metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "a09hopper/django-loci",
"score": 2
} |
#### File: django_loci/base/admin.py
```python
import json
from django import forms
from django.conf.urls import url
from django.contrib import admin
from django.contrib.contenttypes.admin import GenericStackedInline
from django.core.exceptions import ValidationError
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from leaflet.admin import LeafletGeoAdmin
from openwisp_utils.admin import TimeReadonlyAdminMixin
from ..fields import GeometryField
from ..widgets import FloorPlanWidget, ImageWidget
from .models import AbstractLocation
class AbstractFloorPlanForm(forms.ModelForm):
class Meta:
exclude = tuple()
widgets = {'image': ImageWidget()}
class Media:
css = {'all': ('django-loci/css/loci.css',)}
class AbstractFloorPlanAdmin(TimeReadonlyAdminMixin, admin.ModelAdmin):
list_display = ['__str__', 'location', 'floor', 'created', 'modified']
list_select_related = ['location']
search_fields = ['location__name']
raw_id_fields = ['location']
save_on_top = True
class AbstractLocationForm(forms.ModelForm):
class Meta:
exclude = tuple()
class Media:
js = ('django-loci/js/loci.js',
'django-loci/js/floorplan-inlines.js',)
css = {'all': ('django-loci/css/loci.css',)}
class AbstractFloorPlanInline(TimeReadonlyAdminMixin, admin.StackedInline):
extra = 0
ordering = ('floor',)
class AbstractLocationAdmin(TimeReadonlyAdminMixin, LeafletGeoAdmin):
list_display = ['name', 'short_type', 'is_mobile', 'created', 'modified']
search_fields = ['name', 'address']
list_filter = ['type', 'is_mobile']
save_on_top = True
def get_urls(self):
# hardcoding django_loci as the prefix for the
# view names makes it much easier to extend
# without having to change templates
app_label = 'django_loci'
return [
url(r'^(?P<pk>[^/]+)/json/$',
self.admin_site.admin_view(self.json_view),
name='{0}_location_json'.format(app_label)),
url(r'^(?P<pk>[^/]+)/floorplans/json/$',
self.admin_site.admin_view(self.floorplans_json_view),
name='{0}_location_floorplans_json'.format(app_label))
] + super(AbstractLocationAdmin, self).get_urls()
def json_view(self, request, pk):
instance = get_object_or_404(self.model, pk=pk)
return JsonResponse({
'name': instance.name,
'type': instance.type,
'is_mobile': instance.is_mobile,
'address': instance.address,
'geometry': json.loads(instance.geometry.json)
})
def floorplans_json_view(self, request, pk):
instance = get_object_or_404(self.model, pk=pk)
choices = []
for floorplan in instance.floorplan_set.all():
choices.append({
'id': floorplan.pk,
'str': str(floorplan),
'floor': floorplan.floor,
'image': floorplan.image.url,
'image_width': floorplan.image.width,
'image_height': floorplan.image.height,
})
return JsonResponse({'choices': choices})
class UnvalidatedChoiceField(forms.ChoiceField):
"""
skips ChoiceField validation to allow custom options
"""
def validate(self, value):
super(forms.ChoiceField, self).validate(value)
_get_field = AbstractLocation._meta.get_field
class AbstractObjectLocationForm(forms.ModelForm):
FORM_CHOICES = (
('', _('--- Please select an option ---')),
('new', _('New')),
('existing', _('Existing'))
)
LOCATION_TYPES = (
FORM_CHOICES[0],
AbstractLocation.LOCATION_TYPES[0],
AbstractLocation.LOCATION_TYPES[1]
)
location_selection = forms.ChoiceField(choices=FORM_CHOICES, required=False)
name = forms.CharField(label=_('Location name'),
max_length=75, required=False,
help_text=_get_field('name').help_text)
address = forms.CharField(max_length=128, required=False)
type = forms.ChoiceField(choices=LOCATION_TYPES, required=True,
help_text=_get_field('type').help_text)
is_mobile = forms.BooleanField(label=_get_field('is_mobile').verbose_name,
help_text=_get_field('is_mobile').help_text,
required=False)
geometry = GeometryField(required=False)
floorplan_selection = forms.ChoiceField(required=False,
choices=FORM_CHOICES)
floorplan = UnvalidatedChoiceField(choices=((None, FORM_CHOICES[0][1]),),
required=False)
floor = forms.IntegerField(required=False)
image = forms.ImageField(required=False,
widget=ImageWidget(thumbnail=False),
help_text=_('floor plan image'))
indoor = forms.CharField(max_length=64, required=False,
label=_('indoor position'),
widget=FloorPlanWidget)
class Meta:
exclude = tuple()
class Media:
js = ('django-loci/js/loci.js',)
css = {'all': ('django-loci/css/loci.css',)}
def __init__(self, *args, **kwargs):
super(AbstractObjectLocationForm, self).__init__(*args, **kwargs)
# set initial values for custom fields
initial = {}
obj = self.instance
location = obj.location
floorplan = obj.floorplan
if location:
initial.update({
'location_selection': 'existing',
'type': location.type,
'is_mobile': location.is_mobile,
'name': location.name,
'address': location.address,
'geometry': location.geometry,
})
if floorplan:
initial.update({
'floorplan_selection': 'existing',
'floorplan': floorplan.pk,
'floor': floorplan.floor,
'image': floorplan.image
})
floorplan_choices = self.fields['floorplan'].choices
self.fields['floorplan'].choices = floorplan_choices + [(floorplan.pk, floorplan)]
self.initial.update(initial)
@cached_property
def floorplan_model(self):
return self.Meta.model.floorplan.field.remote_field.model
@cached_property
def location_model(self):
return self.Meta.model.location.field.remote_field.model
def clean_floorplan(self):
floorplan_model = self.floorplan_model
if self.cleaned_data['type'] != 'indoor' or self.cleaned_data['floorplan_selection'] == 'new':
return None
pk = self.cleaned_data['floorplan']
if not pk:
raise ValidationError(_('No floorplan selected'))
try:
fl = floorplan_model.objects.get(pk=pk)
except floorplan_model.DoesNotExist:
raise ValidationError(_('Selected floorplan does not exist'))
if fl.location != self.cleaned_data['location']:
raise ValidationError(_('This floorplan is associated to a different location'))
return fl
def clean(self):
data = self.cleaned_data
type_ = data['type']
is_mobile = data['is_mobile']
msg = _('this field is required for locations of type %(type)s')
fields = []
if not is_mobile and type_ in ['outdoor', 'indoor']:
fields += ['location_selection', 'name', 'address', 'geometry']
if not is_mobile and type_ == 'indoor':
fields += ['floorplan_selection', 'floor', 'indoor']
if data.get('floorplan_selection') == 'existing':
fields.append('floorplan')
elif data.get('floorplan_selection') == 'new':
fields.append('image')
elif is_mobile and not data.get('location'):
data['name'] = ''
data['address'] = ''
data['geometry'] = ''
data['location_selection'] = 'new'
for field in fields:
if field in data and data[field] in [None, '']:
params = {'type': type_}
err = ValidationError(msg, params=params)
self.add_error(field, err)
def _get_location_instance(self):
data = self.cleaned_data
location = data.get('location') or self.location_model()
location.type = data.get('type') or location.type
location.is_mobile = data.get('is_mobile') or location.is_mobile
location.name = data.get('name') or location.name
location.address = data.get('address') or location.address
location.geometry = data.get('geometry') or location.geometry
return location
def _get_floorplan_instance(self):
data = self.cleaned_data
instance = self.instance
floorplan = data.get('floorplan') or self.floorplan_model()
floorplan.location = instance.location
floor = data.get('floor')
floorplan.floor = floor if floor is not None else floorplan.floor
# the image path is updated only during creation
# or if the image has been actually changed
if data.get('image') and self.initial.get('image') != data.get('image'):
floorplan.image = data['image']
return floorplan
def save(self, commit=True):
instance = self.instance
data = self.cleaned_data
# create or update location
instance.location = self._get_location_instance()
# set name of mobile locations automatically
if data['is_mobile'] and not instance.location.name:
instance.location.name = str(self.instance.content_object)
instance.location.save()
# create or update floorplan
if data['type'] == 'indoor':
instance.floorplan = self._get_floorplan_instance()
instance.floorplan.save()
# call super
return super(AbstractObjectLocationForm, self).save(commit=True)
class ObjectLocationMixin(TimeReadonlyAdminMixin):
"""
Base ObjectLocationInline logic, can be imported and
mixed in with different inline classes (stacked, tabular).
If you need the generic inline look below.
"""
verbose_name = _('geographic information')
verbose_name_plural = verbose_name
raw_id_fields = ('location',)
max_num = 1
extra = 1
template = 'admin/django_loci/location_inline.html'
fieldsets = (
(None, {'fields': ('location_selection',)}),
('Geographic coordinates', {
'classes': ('loci', 'coords'),
'fields': ('location', 'type', 'is_mobile',
'name', 'address', 'geometry'),
}),
('Indoor coordinates', {
'classes': ('indoor', 'coords'),
'fields': ('floorplan_selection', 'floorplan',
'floor', 'image', 'indoor',),
})
)
class AbstractObjectLocationInline(ObjectLocationMixin, GenericStackedInline):
"""
Generic Inline + ObjectLocationMixin
"""
pass
``` |
{
"source": "a09hopper/openwisp-network-topology",
"score": 2
} |
#### File: openwisp-network-topology/openwisp_network_topology/apps.py
```python
from django.conf import settings
from django_netjsongraph.apps import DjangoNetjsongraphConfig
class OpenwispNetworkTopologyConfig(DjangoNetjsongraphConfig):
name = 'openwisp_network_topology'
label = 'topology'
def ready(self, *args, **kwargs):
super(OpenwispNetworkTopologyConfig, self).ready(*args, **kwargs)
self.add_default_menu_items()
def add_default_menu_items(self):
menu_setting = 'OPENWISP_DEFAULT_ADMIN_MENU_ITEMS'
items = [
{'model': 'topology.Topology'}
]
if not hasattr(settings, menu_setting):
setattr(settings, menu_setting, items)
else:
current_menu = getattr(settings, menu_setting)
current_menu += items
``` |
{
"source": "A0nameless0man/Heng-Client",
"score": 3
} |
#### File: Heng-Client/selfTest/src.py
```python
def main():
sum = 0
for i in range(10000000):
sum = i ^ sum
print(sum)
main()
``` |
{
"source": "A0vanc01/Frisky",
"score": 3
} |
#### File: Frisky/learns/models.py
```python
from random import randint
from typing import Optional
from django.db import models
from django.db.models import Count
class LearnManager(models.Manager):
def count_for_label(self, label):
results = self.get_queryset().filter(label=label) \
.values('label') \
.annotate(total=Count('label'))
if len(results) == 0:
return None
return results[0]
def label_counts(self):
return self.get_queryset() \
.values('label') \
.annotate(total=Count('label')) \
.order_by('-total')
def for_label(self, label):
return self.get_queryset().filter(label__iexact=label)
def for_label_indexed(self, label: str, index: int):
learns = self.for_label(label)
if index < 0:
# Django QuerySets don't support negative indexing, so we'll convert to a positive
# index by adding the count() of the returned rows.
# ie, for a size of 3 and index of -1 -> 3 + -1 = 2, which is the last element.
count = learns.aggregate(count=Count('id'))['count']
index += count
return learns[int(index)]
def random(self, label=None):
if label is None:
all_learns = self.get_queryset().all()
count = all_learns.aggregate(count=Count('id'))['count']
if count == 0:
return None
random_index = randint(0, count - 1)
return all_learns[random_index]
learns = self.for_label(label)
count = learns.aggregate(count=Count('id'))['count']
random_index = randint(0, count - 1)
return learns[random_index]
def add(self, label: str, content: str) -> bool:
"""
:param label:
:param content:
:return: True if the record was created
"""
if not self.get_queryset().filter(label=label, content=content).exists():
self.get_queryset().create(label=label, content=content)
return True
return False
def search(self, query: str, label: Optional[str] = None):
if label is None:
return self.get_queryset().filter(content__icontains=query)
return self.get_queryset().filter(label=label, content__icontains=query)
class Learn(models.Model):
label = models.CharField(max_length=50, db_index=True)
content = models.CharField(max_length=2000)
objects = LearnManager()
def __str__(self):
return f'{self.label}: "{self.content}"'
```
#### File: Frisky/learns/tests.py
```python
from django.test import TestCase
from learns.models import Learn
class LearnModelTestCase(TestCase):
def test_tostring(self):
learn = Learn(label='foo', content='bar')
self.assertEqual(str(learn), 'foo: "bar"')
```
#### File: Frisky/plugins/help.py
```python
from frisky.events import MessageEvent
from frisky.plugin import FriskyPlugin, PluginRepositoryMixin
from frisky.responses import FriskyResponse
class HelpPlugin(FriskyPlugin, PluginRepositoryMixin):
commands = ['help']
def command_help(self, message: MessageEvent) -> FriskyResponse:
if len(message.args) == 1:
plugin_name = message.args[0]
if plugin_name == 'help':
return 'Usage: `?help` or `?help <plugin_name>`'
plugin = self.get_plugin_by_name(plugin_name)
if plugin is None:
return f'No such plugin: `{plugin_name}`, try `?help` to list installed plugins'
if (help_text := plugin.help_text()) is None:
return f'Plugin `{plugin_name}` does not provide help text.'
return help_text
plugins = self.get_plugin_names()
joined_string = ', '.join(plugins)
return f'Available plugins: {joined_string}'
```
#### File: Frisky/plugins/stock.py
```python
from typing import Optional, Tuple
import requests
from frisky.events import MessageEvent
from frisky.plugin import FriskyPlugin
from frisky.responses import FriskyResponse
class StockPlugin(FriskyPlugin):
@classmethod
def register_commands(cls) -> Tuple:
return 'stock', 'stonk'
@classmethod
def help_text(cls) -> Optional[str]:
return 'Usage: ?stock $SYMBOL'
def format_url(self, symbol):
return f'https://query1.finance.yahoo.com/v8/finance/chart/{symbol}?range=1d&includePrePost=false&interval=2m'
def format_money(self, money, currency):
if currency == 'USD':
if money >= 0:
return f'${money:.2f}'
else:
money *= -1
return f'-${money:.2f}'
raise NotImplementedError(f'Unsupported currency: {currency}')
def get_chart_emoji(self, is_positive):
if is_positive:
return ':chart_with_upwards_trend:'
else:
return ':chart_with_downwards_trend:'
def handle_message(self, message: MessageEvent) -> FriskyResponse:
if len(message.args) < 1:
return self.help_text()
symbol = message.args[0]
url = self.format_url(symbol)
response = requests.get(url)
if response.status_code == 200:
json = response.json()
result = json['chart']['result'][0]
currency = result['meta']['currency']
last_close = result['meta']['previousClose']
trades = result['indicators']['quote'][0]['close']
if len(trades):
last_trade = trades.pop()
daily_change = last_trade - last_close
percentage_change = 100 * daily_change / last_close
is_positive = daily_change > 0
return f'{self.get_chart_emoji(is_positive)} {symbol} last traded at ' \
f'{self.format_money(last_trade, currency)} ' \
f'({self.format_money(daily_change, currency)} {percentage_change:.2f}%)'
else:
close_msg = f'{symbol} last closed at {self.format_money(last_close, currency)}'
return close_msg
```
#### File: Frisky/scores/models.py
```python
from typing import Dict
from django.db import models
class GameManager(models.Manager):
def delete_all_named(self, name):
self.filter(name=name).delete()
def create_named(self, name, starting_score, participants):
self.delete_all_named(name)
game = self.create(name=name)
for entrant in participants:
game.scores.create(
name=entrant,
value=starting_score
)
return game
def get_named(self, name):
try:
return self.get(name=name)
except Game.DoesNotExist:
return None
class Game(models.Model):
name = models.CharField(max_length=100)
objects = GameManager()
def get_all_scores(self) -> Dict:
result = {}
for score in self.scores.all():
result[score.name] = score.value
return result
def alter_score(self, name: str, delta: int):
score = self.scores.get(name=name)
score.value += delta
score.save()
class Score(models.Model):
game = models.ForeignKey(Game, on_delete=models.CASCADE, related_name='scores')
name = models.CharField(max_length=50)
value = models.IntegerField()
```
#### File: slack/api/models.py
```python
from dataclasses import dataclass
from typing import List, Dict, Optional
from dataclasses_json import DataClassJsonMixin
class BaseModel(DataClassJsonMixin):
def key(self):
if hasattr(self, 'id'):
return self.create_key(getattr(self, 'id'))
return None
@classmethod
def create_key(cls, *args):
return cls.__name__ + ':' + ':'.join(args)
@classmethod
def create(cls, obj):
if isinstance(obj, dict):
return cls.from_dict(obj)
if isinstance(obj, str):
return cls.from_json(obj)
if isinstance(obj, list):
return [cls.create(item) for item in obj]
return None
@dataclass
class Profile(BaseModel):
""" Example Profile
{
"avatar_hash": "ge3b51ca72de",
"status_text": "Print is dead",
"status_emoji": ":books:",
"real_name": "<NAME>",
"display_name": "spengler",
"real_name_normalized": "<NAME>",
"display_name_normalized": "spengler",
"email": "<EMAIL>",
"image_original": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_24": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_32": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_48": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_72": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_192": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_512": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"team": "T012AB3C4"
}
"""
real_name: str
display_name: str
real_name_normalized: str
display_name_normalized: str
team: str
@dataclass
class User(BaseModel):
""" Example API User:
{
"id": "W012A3CDE",
"team_id": "T012AB3C4",
"name": "spengler",
"deleted": false,
"color": "9f69e7",
"real_name": "<NAME>",
"tz": "America/Los_Angeles",
"tz_label": "Pacific Daylight Time",
"tz_offset": -25200,
"profile": {
"avatar_hash": "ge3b51ca72de",
"status_text": "Print is dead",
"status_emoji": ":books:",
"real_name": "<NAME>",
"display_name": "spengler",
"real_name_normalized": "<NAME>",
"display_name_normalized": "spengler",
"email": "<EMAIL>",
"image_original": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_24": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_32": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_48": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_72": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_192": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"image_512": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
"team": "T012AB3C4"
},
"is_admin": true,
"is_owner": false,
"is_primary_owner": false,
"is_restricted": false,
"is_ultra_restricted": false,
"is_bot": false,
"updated": 1502138686,
"is_app_user": false,
"has_2fa": false
}
"""
id: str
name: str
real_name: str
team_id: str
profile: Profile
def get_short_name(self):
if self.profile is not None:
name = self.profile.display_name_normalized
if name is not None and name != '':
return name
name = self.profile.real_name_normalized
if name is not None and name != '':
return name
if self.name is not None and self.name != '':
return self.name
return 'unknown'
@dataclass
class Conversation(BaseModel):
id: str
name: Optional[str] = None
is_channel: bool = False
is_group: bool = False
is_private: bool = False
is_im: bool = False
@dataclass
class Team(BaseModel):
id: str
name: str
domain: str
@dataclass
class File(BaseModel):
id: str
permalink: str
@dataclass
class Message(BaseModel):
user: str
text: str
ts: str
files: Optional[List[File]] = None
@dataclass
class RateLimitedEvent(DataClassJsonMixin):
"""
{
"token": "<KEY>",
"type": "app_rate_limited",
"team_id": "T123456",
"minute_rate_limited": 1518467820,
"api_app_id": "A123456"
}
"""
token: str
type: str
team_id: str
minute_rate_limited: int
api_app_id: str
@dataclass
class Event(BaseModel):
"""
The Slack Event wrapper. An example:
{
"token": "<KEY>",
"team_id": "T061EG9RZ",
"api_app_id": "A0FFV41KK",
"event": {
"type": "reaction_added",
"user": "U061F1EUR",
"item": {
"type": "message",
"channel": "C061EG9SL",
"ts": "1464196127.000002"
},
"reaction": "slightly_smiling_face",
"item_user": "U0M4RL1NY",
"event_ts": "1465244570.336841"
},
"type": "event_callback",
"authed_users": [
"U061F7AUR"
],
"event_id": "Ev9UQ52YNA",
"event_time": 1234567890
}
"""
token: str
team_id: str
api_app_id: str
event: Dict[str, any]
type: str
authed_users: List[str]
event_id: str
event_time: int
def get_event(self):
event_type = self.event.get('type', None)
subtype = self.event.get('subtype', None)
if event_type == 'reaction_added' or event_type == 'reaction_removed':
return ReactionAdded.from_dict(self.event)
elif event_type == 'message' and subtype != 'message_changed':
return MessageSent.from_dict(self.event)
return None
@dataclass
class ReactionItem(DataClassJsonMixin):
"""
{
"type": "message",
"channel": "C061EG9SL",
"ts": "1464196127.000002"
}
"""
type: str
channel: str
ts: str
@dataclass
class ReactionAdded(BaseModel):
"""
{
"type": "reaction_added",
"user": "U061F1EUR",
"item": {
"type": "message",
"channel": "C061EG9SL",
"ts": "1464196127.000002"
},
"reaction": "slightly_smiling_face",
"item_user": "U0M4RL1NY",
"event_ts": "1465244570.336841"
}
"""
type: str
user: str
item: ReactionItem
reaction: str
item_user: str
event_ts: str
@dataclass
class MessageSent(BaseModel):
""" Example Event:
{
"type": "message",
"channel": "C024BE91L",
"user": "U2147483697",
"text": "Live long and prospect.",
"ts": "1355517523.000005",
"event_ts": "1355517523.000005",
"channel_type": "channel"
}
"""
channel: str
user: str
text: str
ts: str
event_ts: str
channel_type: str
```
#### File: Frisky/slack/test_tasks.py
```python
from typing import Callable
from unittest import mock
import responses
from django.core.management import call_command
from django.test import TestCase
from frisky.events import ReactionEvent, MessageEvent
from .api.models import ReactionAdded, ReactionItem, MessageSent
from .api.tests import URL
from .api.tests import USER_OK
from .tasks import sanitize_message_text, handle_reaction_event, handle_message_event
conversation = """
{
"ok": true,
"channel": {
"id": "C012AB3CD",
"name": "general",
"is_channel": true,
"is_group": false,
"is_im": false,
"created": 1449252889,
"creator": "W012A3BCD",
"is_archived": false,
"is_general": true,
"unlinked": 0,
"name_normalized": "general",
"is_read_only": false,
"is_shared": false,
"parent_conversation": null,
"is_ext_shared": false,
"is_org_shared": false,
"pending_shared": [],
"is_pending_ext_shared": false,
"is_member": true,
"is_private": false,
"is_mpim": false,
"last_read": "1502126650.228446",
"topic": {
"value": "For public discussion of generalities",
"creator": "W012A3BCD",
"last_set": 1449709364
},
"purpose": {
"value": "This part of the workspace is for fun. Make fun here.",
"creator": "W012A3BCD",
"last_set": 1449709364
},
"previous_names": [
"specifics",
"abstractions",
"etc"
],
"locale": "en-US"
}
}
"""
message = """
{
"ok": true,
"latest": "1512085950.000216",
"messages": [
{
"type": "message",
"user": "U012AB3CDE",
"text": "I find you punny and would like to smell your nose letter",
"ts": "1512085950.000216"
}
],
"has_more": true,
"pin_count": 0,
"response_metadata": {
"next_cursor": "bmV4dF90czoxNTEyMzU2NTI2MDAwMTMw"
}
}
"""
class EventHandlingTestCase(TestCase):
def test_username_substitution(self):
with responses.RequestsMock() as rm:
rm.add('GET', f'{URL}/users.info?user=W012A3CDE', body=USER_OK)
result = sanitize_message_text('<@W012A3CDE> is a jerk')
self.assertEqual('spengler is a jerk', result)
def test_handle_message(self):
expected = MessageEvent(
username='spengler',
channel_name='general',
text='?I like to :poop:',
)
result = None
def mock_handle_message(_, message: MessageEvent, reply_channel: Callable[[str], bool]):
nonlocal result
result = message
patcher = mock.patch(target='frisky.bot.Frisky.handle_message', new=mock_handle_message)
with responses.RequestsMock() as rm:
rm.add('GET', f'{URL}/users.info?user=W012A3CDE', body=USER_OK)
rm.add('GET', f'{URL}/conversations.info?channel=123', body=conversation)
try:
patcher.start()
handle_message_event(MessageSent(
channel='123',
user='W012A3CDE',
text='?I like to :poop:',
ts='123',
event_ts='123',
channel_type='channel'
))
self.assertEqual(expected, result)
finally:
patcher.stop()
def test_handle_reaction(self):
expected = ReactionEvent(
emoji='poop',
username='spengler',
added=True,
message=MessageEvent(
username='spengler',
channel_name='general',
text='I find you punny and would like to smell your nose letter',
)
)
result = None
def mock_handle_reaction(_, reaction: ReactionEvent, reply_channel: Callable[[str], bool]):
nonlocal result
result = reaction
patcher = mock.patch(target='frisky.bot.Frisky.handle_reaction', new=mock_handle_reaction)
with responses.RequestsMock() as rm:
rm.add('GET', f'{URL}/users.info?user=W012A3CDE', body=USER_OK)
rm.add('GET', f'{URL}/conversations.info?channel=123', body=conversation)
api = f'{URL}/conversations.history?channel=C012AB3CD&oldest=123&latest=123&inclusive=true&limit=1'
rm.add('GET', api, body=message)
try:
patcher.start()
handle_reaction_event(event=ReactionAdded(
type='reaction_added',
user='W012A3CDE',
item=ReactionItem(
type='message',
channel='123',
ts='123'
),
reaction='poop',
item_user='W012A3CDE',
event_ts='123'
))
self.assertEqual(expected, result)
finally:
patcher.stop()
class SlackCliTestCase(TestCase):
def test(self):
result = None
def mock_handle_message(_, message: MessageEvent, reply_channel: Callable[[str], bool]):
nonlocal result
result = message
patcher = mock.patch(target='frisky.bot.Frisky.handle_message', new=mock_handle_message)
with responses.RequestsMock() as rm:
rm.add('POST', f'{URL}/chat.postMessage')
try:
patcher.start()
call_command('friskcli', 'ping')
self.assertEqual(b'{"channel": null, "text": "pong"}', rm.calls[0].request.body)
self.assertIsNone(result)
finally:
patcher.stop()
```
#### File: Frisky/tests/test_pipe.py
```python
import responses
from frisky.responses import Image
from frisky.test import FriskyTestCase
from plugins.meme import MemePlugin
class PipeTestCase(FriskyTestCase):
def test_pipe(self):
self.send_message('?learn foo bar')
self.send_message('?pipe foo | upvote')
reply = self.send_message('?votes bar')
self.assertTrue(reply.startswith('bar has 1 '))
def test_piping_memes(self):
self.send_message('?learn foo This Test Passed')
self.send_message('?memealias goodnews 123456')
with responses.RequestsMock() as rm:
rm.add('POST', MemePlugin.CAPTION_IMAGE_URL, body='''
{
"success": true,
"data": {
"url": "https://i.imgflip.com/123abc.jpg",
"page_url": "https://imgflip.com/i/123abc"
}
}
'''.strip())
reply = self.send_message('?pipe foo | meme goodnews "Good News Everyone"')
self.assertEqual(rm.calls[0].request.body,
'template_id=123456&username=&password=&text0=Good+News+Everyone&' +
'text1=This+Test+Passed')
self.assertIsInstance(reply, Image)
self.assertEqual(reply.url, 'https://i.imgflip.com/123abc.jpg')
```
#### File: Frisky/tests/test_roll.py
```python
from unittest import mock
from frisky.test import FriskyTestCase
class RollTestCase(FriskyTestCase):
def test_roll_defaults_to_d20(self):
patcher = mock.patch(target='plugins.roll.die_roll.randint', new=lambda *a, **k: 10)
patcher.start()
self.assertEqual('dummyuser rolled 10 on 1d20 with a chance of 5%', self.send_message('?roll'))
patcher.stop()
def test_roll_takes_param(self):
def roll(arg1, arg2):
self.assertEqual(1, arg1)
self.assertEqual(6, arg2)
return 0
patcher = mock.patch(target='plugins.roll.die_roll.randint', new=roll)
patcher.start()
self.send_message('?roll 1d6')
patcher.stop()
def test_roll_considers_modifier(self):
patcher = mock.patch(target='plugins.roll.die_roll.randint', new=lambda *a, **k: 0)
patcher.start()
result = self.send_message('?roll 1d6+1')
patcher.stop()
self.assertEqual('dummyuser rolled 1 on 1d6+1 with a chance of 16.67%', result)
def test_roll_considers_negative_modifier(self):
patcher = mock.patch(target='plugins.roll.die_roll.randint', new=lambda *a, **k: 0)
patcher.start()
result = self.send_message('?roll 1d6-1')
patcher.stop()
self.assertEqual('dummyuser rolled -1 on 1d6-1 with a chance of 16.67%', result)
def test_invalid_input_shames_user(self):
patcher = mock.patch(target='plugins.roll.die_roll.randint', new=lambda *a, **k: 10)
patcher.start()
self.assertEqual('dummyuser rolled ???... I don\'t know how to roll potato', self.send_message('?roll potato'))
patcher.stop()
def test_multiple_inputs(self):
patcher = mock.patch(target='plugins.roll.die_roll.randint', new=lambda *a, **k: 10)
patcher.start()
self.assertEqual('dummyuser rolled 10 on 1d20 with a chance of 5%, 10 on 1d20 with a chance of 5%, 10 on 1d20 with a chance of 5% for a total of 30', self.send_message('?roll 1d20 1d20 1d20'))
patcher.stop()
def test_big_numbers(self):
self.assertRegex(self.send_message('?roll 1000000d10000'),
'^dummyuser rolled [0-9]+ USING MATH on [0-9]+d[0-9]+ with a chance of [0-9.e-]+%ish$')
def test_bad_big_numbers(self):
self.assertEqual('dummyuser rolled ???... I don\'t know how to roll 11000d-10000', self.send_message('?roll 11000d-10000'))
def test_critical(self):
patcher = mock.patch(target='plugins.roll.die_roll.randint', new=lambda *a, **k: 20)
patcher.start()
result = self.send_message('?roll 2d20+1')
patcher.stop()
self.assertEqual('dummyuser rolled CRITICAL 41 on 2d20+1 with a chance of 0.25%', result)
def test_quiet(self):
patcher = mock.patch(target='plugins.roll.die_roll.randint', new=lambda *a, **k: 20)
patcher.start()
result = self.send_message('?roll 2d20+1q')
patcher.stop()
self.assertEqual('dummyuser rolled 41 on 2d20+1', result)
def test_reasonable_estimation(self):
patcher = mock.patch(target='plugins.roll.die_roll.randint', new=lambda *a, **k: 4)
patcher.start()
result = self.send_message('?roll 10d6')
patcher.stop()
self.assertEqual('dummyuser rolled 40 on 10d6 with a chance of 4.81%ish', result)
def test_slowest_possible(self):
patcher = mock.patch(target='plugins.roll.die_roll.randint', new=lambda *a, **k: 50)
patcher.start()
result = self.send_message('?roll 4d50')
patcher.stop()
self.assertEqual('dummyuser rolled CRITICAL 200 on 4d50 with a chance of 0.000016%', result)
def test_weird_result(self):
patcher = mock.patch(target='plugins.roll.die_roll.randint', new=lambda *a, **k: 100)
patcher.start()
result = self.send_message('?roll 100d100')
patcher.stop()
# I mean, this is wrong - the estimator gets bad values close to the extremes
self.assertEqual('dummyuser rolled CRITICAL 10000 on 100d100 with a chance of 0%ish', result)
def test_critical_fail(self):
patcher = mock.patch(target='plugins.roll.die_roll.randint', new=lambda *a, **k: 1)
patcher.start()
result = self.send_message('?roll 2d50')
patcher.stop()
self.assertEqual('dummyuser rolled CRITICAL FAIL 2 on 2d50 with a chance of 0.04%', result)
def test_zero_dice(self):
result = self.send_message('?roll 0d10')
self.assertEqual('dummyuser rolled CRITICAL 0 on 0d10 with a chance of 100%', result)
def test_damnit_jim(self):
result = self.send_message('?roll 999999999999999999999999999999999999999999d9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999')
self.assertEqual('Damn it Jim, stop trying to break things.', result)
```
#### File: Frisky/tests/test_stock.py
```python
import responses
from frisky.test import FriskyTestCase
from plugins.stock import StockPlugin
positive_change = '''
{
"chart":{
"result":[
{
"meta":{
"currency":"USD",
"previousClose":42.00
},
"indicators":{
"quote":[
{
"close":[
69.69
]
}
]
}
}
]
}
}
'''.strip()
negative_change = '''
{
"chart":{
"result":[
{
"meta":{
"currency":"USD",
"previousClose":42.00
},
"indicators":{
"quote":[
{
"close":[
13.37
]
}
]
}
}
]
}
}
'''.strip()
market_closed = '''
{
"chart":{
"result":[
{
"meta":{
"currency":"USD",
"previousClose":420.69
},
"indicators":{
"quote":[
{
"close":[
]
}
]
}
}
]
}
}
'''.strip()
class StockTestCase(FriskyTestCase):
URL = 'https://query1.finance.yahoo.com/v8/finance/chart/TEST'
def test_stock_market_closed(self):
with responses.RequestsMock() as rm:
rm.add('GET', StockTestCase.URL, body=market_closed)
reply = self.send_message('?stock TEST')
self.assertEqual(reply, 'TEST last closed at $420.69')
def test_positive_change(self):
with responses.RequestsMock() as rm:
rm.add('GET', StockTestCase.URL, body=positive_change)
reply = self.send_message('?stock TEST')
self.assertEqual(reply, ':chart_with_upwards_trend: TEST last traded at $69.69 ($27.69 65.93%)')
def test_negative_change(self):
with responses.RequestsMock() as rm:
rm.add('GET', StockTestCase.URL, body=negative_change)
reply = self.send_message('?stock TEST')
self.assertEqual(reply, ':chart_with_downwards_trend: TEST last traded at $13.37 (-$28.63 -68.17%)')
def test_no_args_returns_the_help_text(self):
reply = self.send_message('?stock')
self.assertEqual(reply, 'Usage: ?stock $SYMBOL')
def test_unknown_currency_throws_error(self):
plugin = StockPlugin()
with self.assertRaises(NotImplementedError):
plugin.format_money(420.69, 'ASDF')
```
#### File: Frisky/votes/models.py
```python
from django.db import models
class Vote(models.Model):
label = models.CharField(max_length=200, db_index=True, unique=True)
votes = models.IntegerField(default=0)
def __str__(self):
return f'Votes for {self.label}'
``` |
{
"source": "a0x8o/commai-env",
"score": 2
} |
#### File: tasks/micro/associative.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from core.task import on_start, on_message, on_timeout, on_output_message, Task
import random
import logging
def generate_lookup(string_length):
source = list(range(2**string_length))
target = list(range(2**string_length))
random.shuffle(target, lambda: random.random())
return {bin(s)[2:].zfill(string_length):
bin(t)[2:].zfill(string_length) for s, t in zip(source, target)}
class EpisodicKeyValueAssociation(Task):
def __init__(self, string_length):
query_len = 3 * string_length + 4
answer_len = string_length + 1
super(EpisodicKeyValueAssociation, self).__init__(
max_time=query_len + answer_len + 1)
self.table = generate_lookup(string_length)
self.logger = logging.getLogger(__name__)
@on_start()
def give_instructions(self, event):
self.finished_talking = False
self.key = random.choice(self.table.keys())
self.set_message('A{key}:{value} V{key}.'.format(key=self.key,
value=self.table[self.key]))
@on_output_message(r'\.')
def reward_at_end(self, event):
self.finished_talking = True
@on_message()
def discard_while_talking(self, event):
if not self.finished_talking:
self.ignore_last_char()
@on_message(r'\.')
def evaluate(self, event):
if not self.finished_talking:
return
if event.is_message(self.table[self.key] + '.'):
self.set_reward(1)
else:
self.set_reward(-1)
@on_timeout()
def handle_timeout(self, event):
self.set_reward(-1)
``` |
{
"source": "a0x8o/felix",
"score": 2
} |
#### File: calico/felix/datastore.py
```python
import logging
import os
import random
import sys
import gevent
from calico.datamodel_v1 import (
WloadEndpointId, ENDPOINT_STATUS_ERROR,
ENDPOINT_STATUS_DOWN, ENDPOINT_STATUS_UP,
TieredPolicyId, HostEndpointId, EndpointId)
from calico.felix import felixbackend_pb2
from calico.felix.actor import Actor, actor_message, TimedGreenlet
from calico.felix.futils import (
logging_exceptions, iso_utc_timestamp, IPV4,
IPV6, StatCounter
)
from calico.felix.protocol import *
from calico.monotonic import monotonic_time
from gevent.event import Event
from google.protobuf.descriptor import FieldDescriptor
_log = logging.getLogger(__name__)
RETRY_DELAY = 5
# Max number of events from driver process before we yield to another greenlet.
MAX_EVENTS_BEFORE_YIELD = 200
# Global diagnostic counters.
_stats = StatCounter("Etcd counters")
class DatastoreAPI(Actor):
"""
Our API to the datastore via the backend driver process.
"""
def __init__(self, config, pipe_from_parent, pipe_to_parent, hosts_ipset):
super(DatastoreAPI, self).__init__()
self._config = config
self.pipe_from_parent = pipe_from_parent
self.pipe_to_parent = pipe_to_parent
self.hosts_ipset = hosts_ipset
# Timestamp storing when the DatastoreAPI started. This info is needed
# in order to report uptime to etcd.
self._start_time = monotonic_time()
# The main etcd-watching greenlet.
self._reader = None
# One-way flag indicating we're being shut down.
self.killed = False
def _on_actor_started(self):
_log.info("%s starting worker threads", self)
reader, writer = self._connect_to_driver()
self.write_api = DatastoreWriter(self._config, writer)
self.write_api.start() # Sends the init message to the back-end.
self._reader = DatastoreReader(
self._config,
reader,
self.write_api,
self.hosts_ipset,
)
self._reader.link(self._on_worker_died)
self._reader.start()
def _connect_to_driver(self):
# Wrap the pipes in reader/writer objects that simplify using the
# protocol.
reader = MessageReader(self.pipe_from_parent)
writer = MessageWriter(self.pipe_to_parent)
return reader, writer
def driver_cmd(self, sck_filename):
if getattr(sys, "frozen", False):
# We're running under pyinstaller, where we share our
# executable with the etcd driver. Re-run this executable
# with the "driver" argument to invoke the etcd driver.
cmd = [sys.argv[0], "driver"]
else:
# Not running under pyinstaller, execute the etcd driver
# directly.
cmd = [sys.executable, "-m", "calico.etcddriver"]
# etcd driver takes the felix socket name as argument.
cmd = ["/home/gulfstream/go-work/src/github.com/tigera/"
"libcalico-go/bin/felix-backend"]
cmd += [sck_filename]
return cmd
@actor_message()
def load_config(self):
"""
Loads our config from etcd, should only be called once.
:return: an Event which is triggered when the config has been loaded.
"""
self._reader.load_config.set()
return self._reader.configured
@actor_message()
def start_watch(self, splitter):
"""
Starts watching etcd for changes. Implicitly loads the config
if it hasn't been loaded yet.
"""
assert self._reader.load_config.is_set(), (
"load_config() should be called before start_watch()."
)
self._reader.splitter = splitter
self._reader.begin_polling.set()
@actor_message()
def kill(self):
self.killed = True
self._reader.kill_watcher()
def _on_worker_died(self, watch_greenlet):
"""
Greenlet: spawned by the gevent Hub if the etcd watch loop ever
stops, kills the process.
"""
_log.critical("Worker greenlet died: %s; exiting.", watch_greenlet)
sys.exit(1)
class DatastoreReader(TimedGreenlet):
"""
Greenlet that read from the etcd driver over a socket.
* Does the initial handshake with the driver, sending it the init
message.
* Receives the pre-loaded config from the driver and uses that
to do Felix's one-off configuration.
* Sends the relevant config back to the driver.
* Processes the event stream from the driver, sending it on to
the splitter.
This class is similar to the EtcdWatcher class in that it uses
a PathDispatcher to fan out updates but it doesn't own an etcd
connection of its own.
"""
def __init__(self, config, msg_reader, datastore_writer, hosts_ipset):
super(DatastoreReader, self).__init__()
self._config = config
self.hosts_ipset = hosts_ipset
self._msg_reader = msg_reader
self._datastore_writer = datastore_writer
# Whether we've been in sync with etcd at some point.
self._been_in_sync = False
# Keep track of the config loaded from etcd so we can spot if it
# changes.
self.last_global_config = None
self.last_host_config = None
# Events triggered by the DatastoreAPI Actor to tell us to load the
# config and start polling. These are one-way flags.
self.load_config = Event()
self.begin_polling = Event()
# Event that we trigger once the config is loaded.
self.configured = Event()
# Polling state initialized at poll start time.
self.splitter = None
# Next-hop IP addresses of our hosts, if populated in etcd.
self.ipv4_by_hostname = {}
# Forces a resync after the current poll if set. Safe to set from
# another thread. Automatically reset to False after the resync is
# triggered.
self.resync_requested = False
# True if we've been shut down.
self.killed = False
# Stats.
self.read_count = 0
self.ip_upd_count = 0
self.ip_remove_count = 0
self.msgs_processed = 0
self.last_rate_log_time = monotonic_time()
self.last_ip_upd_log_time = monotonic_time()
self.last_ip_remove_log_time = monotonic_time()
@logging_exceptions
def _run(self):
# Don't do anything until we're told to load the config.
_log.info("Waiting for load_config event...")
self.load_config.wait()
_log.info("...load_config set. Starting driver read %s loop", self)
# Loop reading from the socket and processing messages.
self._loop_reading_from_driver()
def _loop_reading_from_driver(self):
while True:
try:
# Note: self._msg_reader.new_messages() returns iterator so
# whole for loop must be inside the try.
for msg_type, msg, seq_no in self._msg_reader.new_messages():
self._dispatch_msg_from_driver(msg_type, msg, seq_no)
except SocketClosed:
_log.critical("The driver process closed its socket, Felix "
"must exit.")
die_and_restart()
def _dispatch_msg_from_driver(self, msg_type, msg, seq_no):
_log.debug("Dispatching message (%s) of type: %s", seq_no, msg_type)
if msg_type not in {MSG_TYPE_CONFIG_UPDATE,
MSG_TYPE_INIT,
MSG_TYPE_IN_SYNC}:
if not self.begin_polling.is_set():
_log.info("Non-init message, waiting for begin_polling flag")
self.begin_polling.wait()
if msg_type == MSG_TYPE_IPSET_DELTA:
_stats.increment("IP set delta messages")
self._on_ipset_delta_msg_from_driver(msg)
elif msg_type == MSG_TYPE_IPSET_REMOVED:
_stats.increment("IP set removed messages")
self._on_ipset_removed_msg_from_driver(msg)
elif msg_type == MSG_TYPE_IPSET_UPDATE:
_stats.increment("IP set added messages")
self._on_ipset_update_msg_from_driver(msg)
elif msg_type == MSG_TYPE_WL_EP_UPDATE:
_stats.increment("Workload endpoint update messages")
self.on_wl_endpoint_update(msg)
elif msg_type == MSG_TYPE_WL_EP_REMOVE:
_stats.increment("Workload endpoint remove messages")
self.on_wl_endpoint_remove(msg)
elif msg_type == MSG_TYPE_HOST_EP_UPDATE:
_stats.increment("Host endpoint update messages")
self.on_host_ep_update(msg)
elif msg_type == MSG_TYPE_HOST_EP_REMOVE:
_stats.increment("Host endpoint update remove")
self.on_host_ep_remove(msg)
elif msg_type == MSG_TYPE_HOST_METADATA_UPDATE:
_stats.increment("Host endpoint update messages")
self.on_host_meta_update(msg)
elif msg_type == MSG_TYPE_HOST_METADATA_REMOVE:
_stats.increment("Host endpoint remove messages")
self.on_host_meta_remove(msg)
elif msg_type == MSG_TYPE_IPAM_POOL_UPDATE:
_stats.increment("IPAM pool update messagess")
self.on_ipam_pool_update(msg)
elif msg_type == MSG_TYPE_IPAM_POOL_REMOVE:
_stats.increment("IPAM pool remove messages")
self.on_ipam_pool_remove(msg)
elif msg_type == MSG_TYPE_POLICY_UPDATE:
_stats.increment("Policy update messages")
self.on_tiered_policy_update(msg)
elif msg_type == MSG_TYPE_POLICY_REMOVED:
_stats.increment("Policy update messages")
self.on_tiered_policy_remove(msg)
elif msg_type == MSG_TYPE_PROFILE_UPDATE:
_stats.increment("Profile update messages")
self.on_prof_rules_update(msg)
elif msg_type == MSG_TYPE_PROFILE_REMOVED:
_stats.increment("Profile update messages")
self.on_prof_rules_remove(msg)
elif msg_type == MSG_TYPE_CONFIG_UPDATE:
_stats.increment("Config loaded messages")
self._on_config_update(msg)
elif msg_type == MSG_TYPE_IN_SYNC:
_stats.increment("Status messages")
self._on_in_sync(msg)
else:
_log.error("Unexpected message %r %s", msg_type, msg)
raise RuntimeError("Unexpected message %s" % msg)
self.msgs_processed += 1
if self.msgs_processed % MAX_EVENTS_BEFORE_YIELD == 0:
# Yield to ensure that other actors make progress. (gevent only
# yields for us if the socket would block.) The sleep must be
# non-zero to work around gevent issue where we could be
# immediately rescheduled.
gevent.sleep(0.000001)
def _on_config_update(self, msg):
"""
Called when we receive a config loaded message from the driver.
This message is expected once per resync, when the config is
pre-loaded by the driver.
On the first call, responds to the driver synchronously with a
config response.
If the config has changed since a previous call, triggers Felix
to die.
"""
global_config = dict(msg.config)
host_config = dict(msg.config)
_log.info("Config loaded by driver: %s", msg.config)
if self.configured.is_set():
# We've already been configured. We don't yet support
# dynamic config update so instead we check if the config
# has changed and die if it has.
_log.info("Checking configuration for changes...")
if (host_config != self.last_host_config or
global_config != self.last_global_config):
_log.warning("Felix configuration has changed, "
"felix must restart.")
_log.info("Old host config: %s", self.last_host_config)
_log.info("New host config: %s", host_config)
_log.info("Old global config: %s",
self.last_global_config)
_log.info("New global config: %s", global_config)
die_and_restart()
else:
# First time loading the config. Report it to the config
# object. Take copies because report_etcd_config is
# destructive.
self.last_host_config = host_config.copy()
self.last_global_config = global_config.copy()
self._config.update_from(msg.config)
_log.info("Config loaded: %s", self._config.__dict__)
self.configured.set()
self._datastore_writer.on_config_resolved(async=True)
_log.info("Config loaded by driver: %s", msg.config)
def _on_in_sync(self, msg):
"""
Called when we receive a status update from the driver.
The driver sends us status messages whenever its status changes.
It moves through these states:
(1) wait-for-ready (waiting for the global ready flag to become set)
(2) resync (resyncing with etcd, processing a snapshot and any
concurrent events)
(3) in-sync (snapshot processsing complete, now processing only events
from etcd)
If the driver falls out of sync with etcd then it will start again
from (1).
If the status is in-sync, triggers the relevant processing.
"""
_log.info("Datastore now in sync")
# We're now in sync, tell the Actors that need to do start-of-day
# cleanup.
self.begin_polling.wait() # Make sure splitter is set.
self._been_in_sync = True
self.splitter.on_datamodel_in_sync()
self._update_hosts_ipset()
def _on_ipset_update_msg_from_driver(self, msg):
self.splitter.on_ipset_update(msg.id,
msg.members or [])
def _on_ipset_removed_msg_from_driver(self, msg):
self.splitter.on_ipset_removed(msg.id)
def _on_ipset_delta_msg_from_driver(self, msg):
_log.debug("IP set delta updates: %s", msg)
# Output some very coarse stats.
self.ip_upd_count += 1
if self.ip_upd_count % 1000 == 0:
now = monotonic_time()
delta = now - self.last_ip_upd_log_time
_log.info("Processed %s IP updates from driver "
"%.1f/s", self.ip_upd_count, 1000.0 / delta)
self.last_ip_upd_log_time = now
self.splitter.on_ipset_delta_update(msg.id,
msg.added_members or [],
msg.removed_members or [])
def on_wl_endpoint_update(self, msg):
"""Handler for endpoint updates, passes the update to the splitter.
:param msg felixbackend_pb2.WorkloadEndpointUpdate"""
hostname = self._config.HOSTNAME
orchestrator = msg.id.orchestrator_id
workload_id = msg.id.workload_id
endpoint_id = msg.id.endpoint_id
combined_id = WloadEndpointId(hostname, orchestrator, workload_id,
endpoint_id)
_log.debug("Endpoint %s updated", combined_id)
_stats.increment("Endpoint created/updated")
endpoint = {
"state": msg.endpoint.state,
"name": msg.endpoint.name,
"mac": msg.endpoint.mac or None,
"profile_ids": msg.endpoint.profile_ids,
"ipv4_nets": msg.endpoint.ipv4_nets,
"ipv6_nets": msg.endpoint.ipv6_nets,
"tiers": convert_pb_tiers(msg.endpoint.tiers),
"ipv4_nat": convert_nats(msg.endpoint.ipv4_nat),
"ipv6_nat": convert_nats(msg.endpoint.ipv6_nat),
}
self.splitter.on_endpoint_update(combined_id, endpoint)
def on_wl_endpoint_remove(self, msg):
"""Handler for endpoint updates, passes the update to the splitter.
:param msg felixbackend_pb2.WorkloadEndpointUpdate"""
hostname = self._config.HOSTNAME
orchestrator = msg.id.orchestrator_id
workload_id = msg.id.workload_id
endpoint_id = msg.id.endpoint_id
combined_id = WloadEndpointId(hostname, orchestrator, workload_id,
endpoint_id)
_log.debug("Endpoint %s removed", combined_id)
_stats.increment("Endpoint removed")
self.splitter.on_endpoint_update(combined_id, None)
def on_host_ep_update(self, msg):
"""Handler for create/update of host endpoint."""
hostname = self._config.HOSTNAME
endpoint_id = msg.id.endpoint_id
combined_id = HostEndpointId(hostname, endpoint_id)
_log.debug("Host endpoint %s updated", combined_id)
_stats.increment("Host endpoint created/updated")
endpoint = {
"name": msg.endpoint.name or None,
"profile_ids": msg.endpoint.profile_ids,
"expected_ipv4_addrs": msg.endpoint.expected_ipv4_addrs,
"expected_ipv6_addrs": msg.endpoint.expected_ipv6_addrs,
"tiers": convert_pb_tiers(msg.endpoint.tiers),
}
self.splitter.on_host_ep_update(combined_id, endpoint)
def on_host_ep_remove(self, msg):
"""Handler for create/update of host endpoint."""
hostname = self._config.HOSTNAME
endpoint_id = msg.id.endpoint_id
combined_id = HostEndpointId(hostname, endpoint_id)
_log.debug("Host endpoint %s removed", combined_id)
_stats.increment("Host endpoint removed")
self.splitter.on_host_ep_update(combined_id, None)
def on_prof_rules_update(self, msg):
"""Handler for rules updates, passes the update to the splitter."""
profile_id = msg.id.name
_log.debug("Rules for %s set", profile_id)
_stats.increment("Rules created/updated")
profile_id = intern(profile_id.encode("utf8"))
rules = {
"inbound_rules": convert_pb_rules(msg.profile.inbound_rules),
"outbound_rules": convert_pb_rules(msg.profile.outbound_rules),
}
self.splitter.on_rules_update(profile_id, rules)
def on_prof_rules_remove(self, msg):
"""Handler for rules updates, passes the update to the splitter."""
profile_id = msg.id.name
_log.debug("Rules for %s set", profile_id)
_stats.increment("Rules created/updated")
profile_id = intern(profile_id.encode("utf8"))
self.splitter.on_rules_update(profile_id, None)
def on_tiered_policy_update(self, msg):
_log.debug("Rules for %s/%s set", msg.id.tier, msg.id.name)
_stats.increment("Tiered rules created/updated")
policy_id = TieredPolicyId(msg.id.tier, msg.id.name)
rules = {
"inbound_rules": convert_pb_rules(msg.policy.inbound_rules),
"outbound_rules": convert_pb_rules(msg.policy.outbound_rules),
}
self.splitter.on_rules_update(policy_id, rules)
def on_tiered_policy_remove(self, msg):
_log.debug("Rules for %s/%s set", msg.id.tier, msg.id.name)
_stats.increment("Tiered rules created/updated")
policy_id = TieredPolicyId(msg.id.tier, msg.id.name)
self.splitter.on_rules_update(policy_id, None)
def on_host_meta_update(self, msg):
if not self._config.IP_IN_IP_ENABLED:
_log.debug("Ignoring update to host IP because IP-in-IP disabled")
return
_stats.increment("Host IP created/updated")
self.ipv4_by_hostname[msg.hostname] = msg.ipv4_addr
self._update_hosts_ipset()
def on_host_meta_remove(self, msg):
if not self._config.IP_IN_IP_ENABLED:
_log.debug("Ignoring update to host IP because IP-in-IP is "
"disabled")
return
_stats.increment("Host IP removed")
if self.ipv4_by_hostname.pop(msg.hostname, None):
self._update_hosts_ipset()
def _update_hosts_ipset(self):
if not self._been_in_sync:
_log.debug("Deferring update to hosts ipset until we're in-sync")
return
self.hosts_ipset.replace_members(
frozenset(self.ipv4_by_hostname.values()),
async=True
)
def on_ipam_pool_update(self, msg):
_stats.increment("IPAM pool created/updated")
pool = {
"cidr": msg.pool.cidr,
"masquerade": msg.pool.masquerade,
}
self.splitter.on_ipam_pool_updated(msg.id, pool)
def on_ipam_pool_remove(self, msg):
_stats.increment("IPAM pool deleted")
self.splitter.on_ipam_pool_updated(msg.id, None)
def kill_watcher(self):
self.killed = True
class DatastoreWriter(Actor):
"""
Actor that manages and rate-limits the queue of status reports to
etcd.
"""
def __init__(self, config, message_writer):
super(DatastoreWriter, self).__init__()
self._config = config
self._start_time = monotonic_time()
self._writer = message_writer
self._endpoint_status = {IPV4: {}, IPV6: {}}
self.config_resolved = False
self._dirty_endpoints = set()
self._reporting_allowed = True
self._status_reporting_greenlet = None
@logging_exceptions
def _periodically_report_status(self):
"""
Greenlet: periodically writes Felix's status into the datastore.
:return: Does not return, unless reporting disabled.
"""
interval = self._config.REPORTING_INTERVAL_SECS
_log.info("Reporting Felix status at interval: %s", interval)
# Do a short initial sleep before we report in. This ensures that
# we're stably up before we check in.
jitter = random.random() * 0.1 * interval
sleep_time = interval/2.0 + jitter
_log.info("Delay before initial status report: %.1f", sleep_time)
gevent.sleep(sleep_time)
while True:
self.update_felix_status(async=True)
# Jitter by 10% of interval.
jitter = random.random() * 0.1 * interval
sleep_time = interval + jitter
gevent.sleep(sleep_time)
@actor_message()
def on_config_resolved(self):
# Config now fully resolved, inform the driver.
self.config_resolved = True
if self._config.REPORTING_INTERVAL_SECS > 0:
self._status_reporting_greenlet = TimedGreenlet(
self._periodically_report_status
)
self._status_reporting_greenlet.link_exception(
self._on_worker_died
)
self._status_reporting_greenlet.start()
@actor_message()
def on_endpoint_status_changed(self, endpoint_id, ip_type, status):
assert isinstance(endpoint_id, EndpointId)
if status is not None:
_stats.increment("Endpoint status updated")
self._endpoint_status[ip_type][endpoint_id] = status
else:
_stats.increment("Endpoint status deleted")
self._endpoint_status[ip_type].pop(endpoint_id, None)
self._mark_endpoint_dirty(endpoint_id)
@actor_message()
def update_felix_status(self):
"""Sends Felix's status to the backend driver."""
time_formatted = iso_utc_timestamp()
uptime = monotonic_time() - self._start_time
envelope = felixbackend_pb2.FromDataplane()
payload = envelope.process_status_update
payload.iso_timestamp = time_formatted
payload.uptime = uptime
self._writer.send_message(envelope)
def _mark_endpoint_dirty(self, endpoint_id):
assert isinstance(endpoint_id, EndpointId)
_log.debug("Marking endpoint %s dirty", endpoint_id)
self._dirty_endpoints.add(endpoint_id)
def _finish_msg_batch(self, batch, results):
if not self.config_resolved:
_log.debug("Still waiting for config, skipping endpoint status "
"updates")
return
if not self._config.REPORT_ENDPOINT_STATUS:
_log.debug("Endpoint reporting disabled, clearing any state.")
self._endpoint_status[IPV4].clear()
self._endpoint_status[IPV6].clear()
self._dirty_endpoints.clear()
return
for ep_id in self._dirty_endpoints:
status_v4 = self._endpoint_status[IPV4].get(ep_id)
status_v6 = self._endpoint_status[IPV6].get(ep_id)
status = combine_statuses(status_v4, status_v6)
self._write_endpoint_status(ep_id, status)
self._dirty_endpoints.clear()
def _write_endpoint_status(self, ep_id, status):
_stats.increment("Per-port status report writes")
envelope = felixbackend_pb2.FromDataplane()
if isinstance(ep_id, WloadEndpointId):
if status is not None:
payload = envelope.workload_endpoint_status_update
payload.id.orchestrator_id = ep_id.orchestrator
payload.id.workload_id = ep_id.workload
payload.id.endpoint_id = ep_id.endpoint
payload.status.status = status["status"]
else:
payload = envelope.workload_endpoint_status_remove
payload.id.orchestrator_id = ep_id.orchestrator
payload.id.workload_id = ep_id.workload
payload.id.endpoint_id = ep_id.endpoint
else:
if status is not None:
payload = envelope.host_endpoint_status_update
payload.id.endpoint_id = ep_id.endpoint
payload.status.status = status["status"]
else:
payload = envelope.host_endpoint_status_remove
payload.id.endpoint_id = ep_id.endpoint
self._writer.send_message(envelope)
def _on_worker_died(self, watch_greenlet):
"""
Greenlet: spawned by the gevent Hub if the worker ever stops, kills
the process.
"""
_log.critical("Worker greenlet died: %s; exiting.",
watch_greenlet)
sys.exit(1)
def combine_statuses(status_a, status_b):
"""
Combines a pair of status reports for the same interface.
If one status is None, the other is returned. Otherwise, the worst
status wins.
"""
if not status_a:
return status_b
if not status_b:
return status_a
a = status_a["status"]
b = status_b["status"]
if a == ENDPOINT_STATUS_ERROR or b == ENDPOINT_STATUS_ERROR:
return {"status": ENDPOINT_STATUS_ERROR}
elif a == ENDPOINT_STATUS_DOWN or b == ENDPOINT_STATUS_DOWN:
return {"status": ENDPOINT_STATUS_DOWN}
else:
return {"status": ENDPOINT_STATUS_UP}
def convert_nats(nats):
dict_nats = []
for nat in nats:
d_nat = {"ext_ip": nat.ext_ip, "int_ip": nat.int_ip}
dict_nats.append(d_nat)
return dict_nats
def convert_pb_tiers(tiers):
dict_tiers = []
for pb_tier in tiers:
d_tier = {"name": pb_tier.name, "policies": pb_tier.policies}
dict_tiers.append(d_tier)
return dict_tiers
def convert_pb_rules(pb_rules):
dict_rules = []
for pb_rule in pb_rules:
_log.debug("Converting protobuf rule: %r type: %s",
pb_rule, pb_rule.__class__)
d_rule = {}
for fd, value in pb_rule.ListFields():
if value is None:
continue
if fd.type == FieldDescriptor.TYPE_STRING and value == "":
continue
if fd.type in (FieldDescriptor.TYPE_INT32,
FieldDescriptor.TYPE_INT64) and value == 0:
continue
_log.debug("Field %s = %s", fd.name, value)
negated = fd.name.startswith("not_")
stem = fd.name if not negated else fd.name[4:]
dict_name = "!" + stem if negated else stem
if stem.endswith("_ports"):
value = convert_pb_ports(value)
elif stem.endswith("protocol"):
value = convert_pb_protocol(value)
elif stem.endswith("ip_set_ids"):
value = list(value)
if stem == "icmp_type_code":
# Special case: ICMP is represented by an object, unpack it.
d_rule[("!" if negated else "") + "icmp_type"] = value.type
d_rule[("!" if negated else "") + "icmp_code"] = value.code
else:
d_rule[dict_name] = value
dict_rules.append(d_rule)
return dict_rules
def convert_pb_ports(pb_ports):
_log.debug("Converting ports: %s", pb_ports)
return map(convert_port, pb_ports)
def convert_port(pb_port):
if pb_port.first == pb_port.last:
return pb_port.first
else:
return "%s:%s" % (pb_port.first, pb_port.last)
def convert_pb_protocol(pb_proto):
if pb_proto.HasField("number"):
return pb_proto.number
else:
return pb_proto.name
def die_and_restart():
# Sleep so that we can't die more than 5 times in 10s even if someone is
# churning the config. This prevents our upstart/systemd jobs from giving
# up on us.
gevent.sleep(2)
# Use a failure code to tell systemd that we expect to be restarted. We
# use os._exit() because it is bullet-proof.
os._exit(1)
```
#### File: calico/felix/frules.py
```python
import logging
import time
import netaddr
from calico.felix import devices
from calico.felix import futils
from calico.felix.futils import FailedSystemCall
from calico.felix.ipsets import HOSTS_IPSET_V4
_log = logging.getLogger(__name__)
FELIX_PREFIX = "felix-"
# Maximum number of port entries in a "multiport" match rule. Ranges count for
# 2 entries.
MAX_MULTIPORT_ENTRIES = 15
# Name of the global, stateless IP-in-IP device name.
IP_IN_IP_DEV_NAME = "tunl0"
# Rule to catch packets that are being sent down the IPIP tunnel from an
# incorrect local IP address of the host. This happens if:
#
# - the user explicitly binds their socket to the wrong source IP accidentally
# - the user sends traffic to, for example, a Kubernetes service IP, which is
# implemented via NAT instead of routing, leading the kernel to choose the
# wrong source IP.
#
# We NAT the source of the packet to use the tunnel IP. We assume that
# non-local IPs have been correctly routed. Since Calico-assigned IPs are
# non-local (because they're down a veth), they won't get caught by the rule.
# Other remote sources will only reach the tunnel if they're being NATted
# already (for example, a Kubernetes "NodePort"). The kernel will then
# choose the correct source on its own.
POSTROUTING_LOCAL_NAT_FRAGMENT = (
"POSTROUTING "
# Only match if the packet is going out via the tunnel.
"--out-interface %s "
# Match packets that don't have the correct source address. This matches
# local addresses (i.e. ones assigned to this host) limiting the match to
# the output interface (which we matched above as the tunnel). Avoiding
# embedding the IP address lets us use a static rule, which is easier to
# manage.
"-m addrtype ! --src-type LOCAL --limit-iface-out "
# Only match if the IP is also some local IP on the box. This prevents
# us from matching packets from workloads, which are remote as far as the
# routing table is concerned.
"-m addrtype --src-type LOCAL "
# NAT them to use the source IP of the tunnel. Using MASQUERADE means
# the kernel chooses the source automatically.
"-j MASQUERADE" % IP_IN_IP_DEV_NAME
)
# Chain names
# Dispatch chains to and from workload endpoints.
CHAIN_TO_ENDPOINT = FELIX_PREFIX + "TO-ENDPOINT"
CHAIN_FROM_ENDPOINT = FELIX_PREFIX + "FROM-ENDPOINT"
CHAIN_TO_LEAF = FELIX_PREFIX + "TO-EP-PFX"
CHAIN_FROM_LEAF = FELIX_PREFIX + "FROM-EP-PFX"
WORKLOAD_DISPATCH_CHAINS = {
"to_root": CHAIN_TO_ENDPOINT,
"from_root": CHAIN_FROM_ENDPOINT,
"to_leaf": CHAIN_TO_LEAF,
"from_leaf": CHAIN_FROM_LEAF,
}
# Ditto for host endpoints.
CHAIN_TO_IFACE = FELIX_PREFIX + "TO-HOST-IF"
CHAIN_FROM_IFACE = FELIX_PREFIX + "FROM-HOST-IF"
CHAIN_TO_IFACE_LEAF = FELIX_PREFIX + "TO-IF-PFX"
CHAIN_FROM_IFACE_LEAF = FELIX_PREFIX + "FROM-IF-PFX"
HOST_DISPATCH_CHAINS = {
"to_root": CHAIN_TO_IFACE,
"from_root": CHAIN_FROM_IFACE,
"to_leaf": CHAIN_TO_IFACE_LEAF,
"from_leaf": CHAIN_FROM_IFACE_LEAF,
}
# Failsafe whitelist chains.
CHAIN_FAILSAFE_IN = FELIX_PREFIX + "FAILSAFE-IN"
CHAIN_FAILSAFE_OUT = FELIX_PREFIX + "FAILSAFE-OUT"
# Per-endpoint/interface chain prefixes.
CHAIN_TO_PREFIX = FELIX_PREFIX + "to-"
CHAIN_FROM_PREFIX = FELIX_PREFIX + "from-"
# Top-level felix chains.
CHAIN_PREROUTING = FELIX_PREFIX + "PREROUTING"
CHAIN_POSTROUTING = FELIX_PREFIX + "POSTROUTING"
CHAIN_INPUT = FELIX_PREFIX + "INPUT"
CHAIN_OUTPUT = FELIX_PREFIX + "OUTPUT"
CHAIN_FORWARD = FELIX_PREFIX + "FORWARD"
CHAIN_FIP_DNAT = FELIX_PREFIX + 'FIP-DNAT'
CHAIN_FIP_SNAT = FELIX_PREFIX + 'FIP-SNAT'
def load_nf_conntrack():
"""
Try to force the nf_conntrack_netlink kernel module to be loaded.
"""
_log.info("Running conntrack command to force load of "
"nf_conntrack_netlink module.")
try:
# Run a conntrack command to trigger it to load the kernel module if
# it's not already compiled in. We list rules with a randomly-chosen
# link local address. That makes it very unlikely that we generate
# any wasteful output. We used to use "-S" (show stats) here but it
# seems to be bugged on some platforms, generating an error.
futils.check_call(["conntrack", "-L", "-s", "169.254.45.169"])
except FailedSystemCall:
_log.exception("Failed to execute conntrack command to force load of "
"nf_conntrack_netlink module. conntrack commands may "
"fail later.")
def install_global_rules(config, filter_updater, nat_updater, ip_version,
raw_updater=None):
"""
Set up global iptables rules. These are rules that do not change with
endpoint, and are expected never to change (such as the rules that send all
traffic through the top level Felix chains).
This method therefore :
- ensures that all the required global tables are present;
- applies any changes required.
"""
# If enabled, create the IP-in-IP device, but only for IPv4
if ip_version == 4:
if config.IP_IN_IP_ENABLED:
_log.info("IP-in-IP enabled, ensuring device exists.")
try:
_configure_ipip_device(config)
except FailedSystemCall:
# We've seen this fail occasionally if the kernel is
# concurrently starting the tunl0 device. Retry.
_log.exception("Failed to configure IPIP device, retrying...")
time.sleep(1)
_configure_ipip_device(config)
if config.IP_IN_IP_ENABLED and config.IP_IN_IP_ADDR:
# Add a rule to catch packets originated by this host that are
# going down the tunnel with the wrong source address. NAT them
# to use the address of the tunnel device instead. See comment
# on the constant for more details.
_log.info("IPIP enabled and tunnel address set: inserting "
"MASQUERADE rule to ensure tunnelled packets have "
"correct source.")
nat_updater.ensure_rule_inserted(POSTROUTING_LOCAL_NAT_FRAGMENT,
async=False)
else:
# Clean up the rule that we insert above if IPIP is enabled.
_log.info("IPIP disabled or no tunnel address set: removing "
"MASQUERADE rule.")
nat_updater.ensure_rule_removed(POSTROUTING_LOCAL_NAT_FRAGMENT,
async=False)
# Ensure that Calico-controlled IPv6 hosts cannot spoof their IP addresses.
# (For IPv4, this is controlled by a per-interface sysctl.)
iptables_generator = config.plugins["iptables_generator"]
if raw_updater:
raw_prerouting_chain, raw_prerouting_deps = (
iptables_generator.raw_rpfilter_failed_chain(ip_version=ip_version)
)
raw_updater.rewrite_chains({CHAIN_PREROUTING: raw_prerouting_chain},
{CHAIN_PREROUTING: raw_prerouting_deps},
async=False)
for iface_prefix in config.IFACE_PREFIX:
# The interface matching string; for example,
# if interfaces start "tap" then this string is "tap+".
iface_match = iface_prefix + '+'
raw_updater.ensure_rule_inserted(
"PREROUTING --in-interface %s --match rpfilter --invert "
"--jump %s" %
(iface_match, CHAIN_PREROUTING),
async=False)
# Both IPV4 and IPV6 nat tables need felix-PREROUTING,
# felix-POSTROUTING and felix-OUTPUT, along with the dependent
# DNAT and SNAT tables required for NAT/floating IP support.
prerouting_chain, prerouting_deps = (
iptables_generator.nat_prerouting_chain(ip_version=ip_version)
)
postrouting_chain, postrouting_deps = (
iptables_generator.nat_postrouting_chain(ip_version=ip_version)
)
output_chain, output_deps = (
iptables_generator.nat_output_chain(ip_version=ip_version)
)
nat_updater.rewrite_chains({CHAIN_PREROUTING: prerouting_chain,
CHAIN_POSTROUTING: postrouting_chain,
CHAIN_OUTPUT: output_chain,
CHAIN_FIP_DNAT: [],
CHAIN_FIP_SNAT: []},
{CHAIN_PREROUTING: prerouting_deps,
CHAIN_POSTROUTING: postrouting_deps,
CHAIN_OUTPUT: output_deps},
async=False)
nat_updater.ensure_rule_inserted(
"PREROUTING --jump %s" % CHAIN_PREROUTING, async=False)
nat_updater.ensure_rule_inserted(
"POSTROUTING --jump %s" % CHAIN_POSTROUTING, async=False)
nat_updater.ensure_rule_inserted(
"OUTPUT --jump %s" % CHAIN_OUTPUT, async=False)
# Now the filter table. This needs to have felix-FORWARD and felix-INPUT
# chains, which we must create before adding any rules that send to them.
if ip_version == 4 and config.IP_IN_IP_ENABLED:
hosts_set_name = HOSTS_IPSET_V4.set_name
HOSTS_IPSET_V4.ensure_exists()
else:
hosts_set_name = None
input_chain, input_deps = (
iptables_generator.filter_input_chain(ip_version, hosts_set_name)
)
output_chain, output_deps = (
iptables_generator.filter_output_chain(ip_version)
)
forward_chain, forward_deps = (
iptables_generator.filter_forward_chain(ip_version)
)
failsafe_in_chain, failsafe_in_deps = (
iptables_generator.failsafe_in_chain()
)
failsafe_out_chain, failsafe_out_deps = (
iptables_generator.failsafe_out_chain()
)
filter_updater.rewrite_chains(
{
CHAIN_FORWARD: forward_chain,
CHAIN_INPUT: input_chain,
CHAIN_OUTPUT: output_chain,
CHAIN_FAILSAFE_IN: failsafe_in_chain,
CHAIN_FAILSAFE_OUT: failsafe_out_chain,
},
{
CHAIN_FORWARD: forward_deps,
CHAIN_INPUT: input_deps,
CHAIN_OUTPUT: output_deps,
CHAIN_FAILSAFE_IN: failsafe_in_deps,
CHAIN_FAILSAFE_OUT: failsafe_out_deps,
},
async=False)
filter_updater.ensure_rule_inserted(
"INPUT --jump %s" % CHAIN_INPUT,
async=False)
filter_updater.ensure_rule_inserted(
"OUTPUT --jump %s" % CHAIN_OUTPUT,
async=False)
filter_updater.ensure_rule_inserted(
"FORWARD --jump %s" % CHAIN_FORWARD,
async=False)
def _configure_ipip_device(config):
"""Creates and enables the IPIP tunnel device.
:raises FailedSystemCall on failure.
"""
if not devices.interface_exists(IP_IN_IP_DEV_NAME):
# Make sure the IP-in-IP device exists; since we use the global
# device, this command actually creates it as a side-effect of
# initialising the kernel module rather than explicitly creating
# it.
_log.info("Tunnel device didn't exist; creating.")
futils.check_call(["ip", "tunnel", "add", IP_IN_IP_DEV_NAME,
"mode", "ipip"])
futils.check_call(["ip", "link", "set", IP_IN_IP_DEV_NAME, "mtu",
str(config.IP_IN_IP_MTU)])
if not devices.interface_up(IP_IN_IP_DEV_NAME):
_log.info("Tunnel device wasn't up; enabling.")
futils.check_call(["ip", "link", "set", IP_IN_IP_DEV_NAME, "up"])
# Allow an IP address to be added to the tunnel. This is useful to
# allow the host to have an IP on a private IPIP network so that it can
# originate traffic and have it routed correctly.
_log.info("Setting IPIP device IP to %s", config.IP_IN_IP_ADDR)
tunnel_addrs = [netaddr.IPAddress(config.IP_IN_IP_ADDR)] if config.IP_IN_IP_ADDR else []
devices.set_interface_ips(futils.IPV4, IP_IN_IP_DEV_NAME,
set(tunnel_addrs))
_log.info("Configured IPIP device.")
def interface_to_chain_suffix(config, iface_name):
"""
Extracts the suffix from a given interface name, uniquely shortening it
to 16 characters if necessary.
:param iface_name: The interface name
:returns string: the suffix (shortened if necessary)
"""
for prefix in sorted(config.IFACE_PREFIX, reverse=True):
if iface_name.startswith(prefix):
iface_name = iface_name[len(prefix):]
break
iface_name = futils.uniquely_shorten(iface_name, 16)
return iface_name
```
#### File: calico/test/test_common.py
```python
import copy
import re
from collections import namedtuple
import logging
import mock
import sys
from nose.tools import assert_raises
from unittest2 import skip
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import calico.common as common
from calico.common import ValidationFailed
from calico.datamodel_v1 import WloadEndpointId, TieredPolicyId, HostEndpointId
Config = namedtuple("Config", ["IFACE_PREFIX", "HOSTNAME"])
# Logger
_log = logging.getLogger(__name__)
MISSING = object()
class TestCommon(unittest.TestCase):
def setUp(self):
self.m_config = mock.Mock()
self.m_config.IFACE_PREFIX = ["tap"]
self.m_config.HOSTNAME = "localhost"
def tearDown(self):
pass
def test_validate_port(self):
self.assertFalse(common.validate_port(-1))
self.assertFalse(common.validate_port(0))
self.assertTrue(common.validate_port(3))
self.assertTrue(common.validate_port(3))
self.assertTrue(common.validate_port(65535))
self.assertFalse(common.validate_port(65536))
self.assertFalse(common.validate_port("-1"))
self.assertFalse(common.validate_port("0"))
self.assertTrue(common.validate_port("3"))
self.assertTrue(common.validate_port("3"))
self.assertTrue(common.validate_port("65535"))
self.assertFalse(common.validate_port("65536"))
self.assertFalse(common.validate_port("1-10"))
self.assertFalse(common.validate_port("blah"))
def test_validate_rules_canon(self):
rules = {
"inbound_rules": [
{"protocol": "tcp", "ip_version": 4, "src_net": "10/8",
"dst_net": "11.0/16", "src_ports": [10, "11:12"],
"action": "allow",
"log_prefix": "foo!@#$012345678901234567890123456789"},
{"action": "log"},
{"protocol": "tcp", "src_net": None},
],
"outbound_rules": [
{"protocol": "tcp", "ip_version": 6,
"src_net": "2001:0::1/128", "dst_net": "2001:0::/64",
"icmp_type": 7, "icmp_code": 10,
"action": "deny"}
],
}
common.validate_profile("profile_id", rules)
# Check IPs get made canonical.
self.assertEqual(rules, {
"inbound_rules": [
{"protocol": "tcp", "ip_version": 4, "src_net": "10.0.0.0/8",
"dst_net": "192.168.3.11/16", "src_ports": [10, "11:12"],
"action": "allow",
"log_prefix": "foo____01234567890123456789"},
{"action": "log"},
{"protocol": "tcp"},
],
"outbound_rules": [
{"protocol": "tcp", "ip_version": 6,
"src_net": "2001::1/128", "dst_net": "2001::/64",
"icmp_type": 7, "icmp_code": 10,
"action": "deny"}
],
})
def test_validate_ip_addr(self):
self.assertTrue(common.validate_ip_addr("1.2.3.4", 4))
self.assertFalse(common.validate_ip_addr("1.2.3.4.5", 4))
self.assertFalse(common.validate_ip_addr("1.2.3.4/32", 4))
self.assertTrue(common.validate_ip_addr("1.2.3", 4))
self.assertFalse(common.validate_ip_addr("bloop", 4))
self.assertFalse(common.validate_ip_addr("::", 4))
self.assertFalse(common.validate_ip_addr("2001::abc", 4))
self.assertFalse(common.validate_ip_addr("2001::a/64", 4))
self.assertFalse(common.validate_ip_addr("1.2.3.4", 6))
self.assertFalse(common.validate_ip_addr("1.2.3.4.5", 6))
self.assertFalse(common.validate_ip_addr("1.2.3.4/32", 6))
self.assertFalse(common.validate_ip_addr("1.2.3", 6))
self.assertFalse(common.validate_ip_addr("bloop", 6))
self.assertTrue(common.validate_ip_addr("::", 6))
self.assertTrue(common.validate_ip_addr("2001::abc", 6))
self.assertFalse(common.validate_ip_addr("2001::a/64", 6))
self.assertTrue(common.validate_ip_addr("1.2.3.4", None))
self.assertFalse(common.validate_ip_addr("1.2.3.4.5", None))
self.assertFalse(common.validate_ip_addr("1.2.3.4/32", None))
self.assertTrue(common.validate_ip_addr("1.2.3", None))
self.assertFalse(common.validate_ip_addr("bloop", None))
self.assertTrue(common.validate_ip_addr("::", None))
self.assertTrue(common.validate_ip_addr("2001::abc", None))
self.assertFalse(common.validate_ip_addr("2001::a/64", None))
self.assertFalse(common.validate_ip_addr(None, None))
def test_validate_cidr(self):
self.assertTrue(common.validate_cidr("1.2.3.4", 4))
self.assertFalse(common.validate_cidr("1.2.3.4.5", 4))
self.assertTrue(common.validate_cidr("1.2.3.4/32", 4))
self.assertTrue(common.validate_cidr("1.2.3", 4))
self.assertFalse(common.validate_cidr("bloop", 4))
self.assertFalse(common.validate_cidr("::", 4))
self.assertFalse(common.validate_cidr("2001::abc", 4))
self.assertFalse(common.validate_cidr("2001::a/64", 4))
self.assertFalse(common.validate_cidr("1.2.3.4", 6))
self.assertFalse(common.validate_cidr("1.2.3.4.5", 6))
self.assertFalse(common.validate_cidr("1.2.3.4/32", 6))
self.assertFalse(common.validate_cidr("1.2.3", 6))
self.assertFalse(common.validate_cidr("bloop", 6))
self.assertTrue(common.validate_cidr("::", 6))
self.assertTrue(common.validate_cidr("2001::abc", 6))
self.assertTrue(common.validate_cidr("2001::a/64", 6))
self.assertTrue(common.validate_cidr("1.2.3.4", None))
self.assertFalse(common.validate_cidr("1.2.3.4.5", None))
self.assertTrue(common.validate_cidr("1.2.3.4/32", None))
self.assertTrue(common.validate_cidr("1.2.3", None))
self.assertFalse(common.validate_cidr("bloop", None))
self.assertTrue(common.validate_cidr("::", None))
self.assertTrue(common.validate_cidr("2001::abc", None))
self.assertTrue(common.validate_cidr("2001::a/64", None))
self.assertFalse(common.validate_cidr(None, None))
def test_canonicalise_ip(self):
self.assertTrue(common.canonicalise_ip("1.2.3.4", 4), "1.2.3.4")
self.assertTrue(common.canonicalise_ip("1.2.3", 4), "1.2.3.0")
self.assertTrue(common.canonicalise_ip("2001::0:1", 6), "2001::1")
self.assertTrue(common.canonicalise_ip("abcd:eff::", 6), "abcd:eff::")
self.assertTrue(common.canonicalise_ip("abcd:0000:eff::", 6),
"abcd:0:eff::")
self.assertTrue(common.canonicalise_ip("::", 6), "::")
self.assertIsNone(common.canonicalise_ip(None, 4))
self.assertIsNone(common.canonicalise_ip(None, 6))
def test_validate_tier_data(self):
good_data = {"order": 10}
common.validate_tier_data("abcd_-ef", good_data)
with self.assertRaises(ValidationFailed):
# Bad name
common.validate_tier_data("", good_data)
with self.assertRaises(ValidationFailed):
# Bad name
common.validate_tier_data("+|$", good_data)
with self.assertRaises(ValidationFailed):
# Bad order value
common.validate_tier_data("abc", {"order": "10"})
with self.assertRaises(ValidationFailed):
# Non-dict.
common.validate_tier_data("abc", "foo")
# Missing order.
tier = {}
common.validate_tier_data("abc", tier)
self.assertEqual(tier["order"], common.INFINITY)
self.assertGreater(tier["order"], 999999999999999999999999999999999999)
# "default" order.
tier = {"order": "default"}
common.validate_tier_data("abc", tier)
self.assertEqual(tier["order"], common.INFINITY)
self.assertGreater(tier["order"], 999999999999999999999999999999999999)
@skip("golang rewrite")
def test_validate_rules(self):
profile_id = "valid_name-ok."
rules = {'inbound_rules': [],
'outbound_rules': []}
common.validate_profile(profile_id, rules.copy())
with self.assertRaisesRegexp(ValidationFailed,
"Expected profile 'valid_name-ok.' to "
"be a dict"):
common.validate_profile(profile_id, [])
with self.assertRaisesRegexp(ValidationFailed,
"Invalid profile ID"):
common.validate_profile("a&b", rules.copy())
with self.assertRaisesRegexp(ValidationFailed,
"Invalid profile ID"):
common.y(TieredPolicyId("+123", "abc"),
rules.copy())
with self.assertRaisesRegexp(ValidationFailed,
"Invalid profile ID"):
common.y(TieredPolicyId("abc", "+"),
rules.copy())
# No rules.
prof = {}
common.validate_profile("prof1", prof)
self.assertEqual(prof, {"inbound_rules": [], "outbound_rules": []})
rules = {'inbound_rules': 3,
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Expected rules\[inbound_rules\] to be a list"):
common.validate_profile(profile_id, rules.copy())
rule = "not a dict"
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Rule should be a dict"):
common.validate_profile(profile_id, rules.copy())
rule = {'bad_key': ""}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Rule contains unknown keys"):
common.validate_profile(profile_id, rules)
rule = {'protocol': "bloop"}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Invalid protocol bloop in rule "
"{'protocol': 'bloop'}"):
common.validate_profile(profile_id, rules)
rule = {'ip_version': 5}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Invalid ip_version in rule"):
common.validate_profile(profile_id, rules)
rule = {'ip_version': 4,
'protocol': "icmpv6"}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Using icmpv6 with IPv4"):
common.validate_profile(profile_id, rules)
rule = {'ip_version': 6,
'protocol': "icmp"}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Using icmp with IPv6"):
common.validate_profile(profile_id, rules)
rule = {'src_tag': "abc",
'protocol': "icmp"}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
common.validate_profile(profile_id, rules)
rule = {'src_tag': "abc",
'protocol': "123"}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
common.validate_profile(profile_id, rules)
rule = {'protocol': "256"}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Invalid protocol 256 in rule"):
common.validate_profile(profile_id, rules)
rule = {'protocol': "0"}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Invalid protocol 0 in rule"):
common.validate_profile(profile_id, rules)
rule = {'src_tag': "a!b",
'protocol': "icmp"}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Invalid src_tag"):
common.validate_profile(profile_id, rules)
rule = {'dst_tag': "x,y",
'protocol': "icmp"}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Invalid dst_tag"):
common.validate_profile(profile_id, rules)
rule = {'src_selector': "a!b"}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Invalid src_selector"):
common.validate_profile(profile_id, rules)
rule = {'dst_selector': "+b"}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Invalid dst_selector"):
common.validate_profile(profile_id, rules)
rule = {'src_net': "nonsense"}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Invalid CIDR"):
common.validate_profile(profile_id, rules)
rule = {'dst_net': "1.2.3.4/16",
'ip_version': 6}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Invalid CIDR"):
common.validate_profile(profile_id, rules)
rule = {'src_ports': "nonsense"}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Expected ports to be a list"):
common.validate_profile(profile_id, rules)
rule = {'dst_ports': [32, "nonsense"]}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Invalid port"):
common.validate_profile(profile_id, rules)
rule = {'action': "nonsense"}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Invalid action"):
common.validate_profile(profile_id, rules)
rule = {'icmp_type': "nonsense"}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"ICMP type is not an integer"):
common.validate_profile(profile_id, rules)
rule = {'icmp_type': -1}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"ICMP type is out of range"):
common.validate_profile(profile_id, rules)
rule = {'icmp_type': 256}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"ICMP type is out of range"):
common.validate_profile(profile_id, rules)
rule = {'icmp_type': 22,
'icmp_code': "2"}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"ICMP code is not an integer"):
common.validate_profile(profile_id, rules)
rule = {'icmp_type': 0,
'icmp_code': -1}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"ICMP code is out of range"):
common.validate_profile(profile_id, rules)
rule = {'icmp_type': 0,
'icmp_code': 256}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"ICMP code is out of range"):
common.validate_profile(profile_id, rules)
rule = {'icmp_code': 2}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"ICMP code specified without ICMP type"):
common.validate_profile(profile_id, rules)
rule = {'log_prefix': []}
rules = {'inbound_rules': [rule],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Log prefix should be a string"):
common.validate_profile(profile_id, rules)
@skip("golang rewrite")
def test_validate_policy(self):
policy_id = TieredPolicyId("a", "b")
with self.assertRaisesRegexp(ValidationFailed,
"Expected policy 'a/b' to "
"be a dict"):
common.validate_policy(policy_id, [])
rules = {'selector': "+abcd", # Bad selector
'inbound_rules': [],
'outbound_rules': []}
with self.assertRaisesRegexp(ValidationFailed,
"Failed to parse selector"):
common.validate_policy(policy_id, rules)
@skip("golang rewrite")
def test_validate_order(self):
policy = {
"selector": "a == 'b'",
"order": 10,
"inbound_rules": [],
"outbound_rules": [],
}
common.validate_policy(TieredPolicyId("a", "b"), policy)
policy = {
"selector": "a == 'b'",
"order": "10",
"inbound_rules": [],
"outbound_rules": [],
}
with self.assertRaises(ValidationFailed):
common.validate_policy(TieredPolicyId("a", "b"), policy)
policy = {
"selector": "a == 'b'",
"inbound_rules": [],
"outbound_rules": [],
}
common.validate_policy(TieredPolicyId("a", "b"), policy)
self.assertEqual(policy["order"], common.INFINITY)
self.assertGreater(policy["order"], 9999999999999999999999999999999999)
policy = {
"selector": "a == 'b'",
"inbound_rules": [],
"outbound_rules": [],
"order": "default",
}
common.validate_policy(TieredPolicyId("a", "b"), policy)
self.assertEqual(policy["order"], common.INFINITY)
self.assertGreater(policy["order"], 9999999999999999999999999999999999)
policy = {
"order": 10,
"inbound_rules": [],
"outbound_rules": [],
}
with self.assertRaises(ValidationFailed):
common.validate_policy(TieredPolicyId("a", "b"), policy)
def test_validate_rule_port(self):
self.assertEqual(common.validate_rule_port(73), None)
self.assertEqual(common.validate_rule_port("57:123"), None)
self.assertEqual(common.validate_rule_port("0:1024"), None)
self.assertEqual(common.validate_rule_port(0), None)
self.assertEqual(common.validate_rule_port(65536),
"integer out of range")
self.assertEqual(common.validate_rule_port([]),
"neither integer nor string")
self.assertEqual(common.validate_rule_port("1:2:3"),
"range unparseable")
self.assertEqual(common.validate_rule_port("1"),
"range unparseable")
self.assertEqual(common.validate_rule_port(""),
"range unparseable")
self.assertEqual(common.validate_rule_port("a:b"),
"range invalid")
self.assertEqual(common.validate_rule_port("3:1"),
"range invalid")
self.assertEqual(common.validate_rule_port("-1:3"),
"range invalid")
self.assertEqual(common.validate_rule_port("5:65536"),
"range invalid")
def test_validate_tags(self):
profile_id = "valid_name-ok."
tags = [ "name", "_name-with.chars.-_" ]
common.validate_tags(profile_id, tags)
with self.assertRaisesRegexp(ValidationFailed,
"Invalid profile"):
common.validate_tags('bad"value', tags)
with self.assertRaisesRegexp(ValidationFailed,
"Expected tags to be a list"):
common.validate_tags(profile_id, "not a list")
with self.assertRaisesRegexp(ValidationFailed,
"Expected tag.* to be a string"):
common.validate_tags(profile_id, ["value", 3])
with self.assertRaisesRegexp(ValidationFailed,
"Invalid tag"):
common.validate_tags(profile_id, ["value", "bad value"])
def test_validate_ipam_pool(self):
self.assert_ipam_pool_valid({"cidr": "10/16", "foo": "bar"},
{"cidr": "10.0.0.0/16"}, 4)
self.assert_ipam_pool_valid({"cidr": "1234:0::/64"},
{"cidr": "1234::/64"}, 6)
self.assert_ipam_pool_invalid({"cidr": None}, 4)
self.assert_ipam_pool_invalid({"cidr": "10/16"}, 4, pool_id="nonsense")
self.assert_ipam_pool_invalid({}, 6)
self.assert_ipam_pool_invalid({"cidr": "10.0.0.0/16",
"masquerade": "foo"}, 4)
self.assert_ipam_pool_invalid(None, 4)
self.assert_ipam_pool_invalid([], 4)
def assert_ipam_pool_valid(self, pool, expected, version,
pool_id="1234-5"):
common.validate_ipam_pool(pool_id, pool, version)
self.assertEqual(pool, expected)
def assert_ipam_pool_invalid(self, pool, version, pool_id="1234-5"):
self.assertRaises(ValidationFailed,
common.validate_ipam_pool, pool_id, pool, version)
def test_labels_validation(self):
common.validate_labels("prof_id", {"a": "b"})
assert_raises(ValidationFailed,
common.validate_labels, "prof_id", {"a": ["b"]})
assert_raises(ValidationFailed,
common.validate_labels, "prof_id", {"a": [1]})
assert_raises(ValidationFailed,
common.validate_labels, "prof_id", {"a": [None]})
assert_raises(ValidationFailed,
common.validate_labels, "prof_id", {"a": None})
assert_raises(ValidationFailed,
common.validate_labels, "prof_id", {"a": 1})
assert_raises(ValidationFailed,
common.validate_labels, "+", {"a": "b"})
class _BaseTestValidateEndpoint(unittest.TestCase):
validate_endpoint = None
use_fip_by_default = True
def setUp(self):
self.m_config = mock.Mock()
self.m_config.IFACE_PREFIX = ["tap"]
self.m_config.HOSTNAME = "localhost"
def create_id(self):
raise NotImplementedError()
def valid_endpoint(self, **kwargs):
raise NotImplementedError()
def canonical_valid_endpoint(self, **kwargs):
raise NotImplementedError()
def do_canonicalisation_test(self, **kwargs):
endpoint = self.valid_endpoint(**kwargs)
self.validate_endpoint(self.m_config, self.create_id(), endpoint)
self.assertEqual(endpoint, self.canonical_valid_endpoint(**kwargs))
def test_validate_endpoint_canonicalises(self):
self.do_canonicalisation_test()
def test_validate_endpoint_mainline_profile_ids(self):
self.do_canonicalisation_test(use_prof_ids=True)
def test_validate_endpoint_mainline_profile_ids_missing(self):
self.do_canonicalisation_test(use_prof_ids=MISSING)
def test_validate_endpoint_failures_common(self):
self.assert_invalid_endpoint([])
self.assert_invalid_endpoint("foo")
self.assert_invalid_endpoint(1234)
def assert_invalid_endpoint(self, bad_value):
self.assertRaises(common.ValidationFailed, self.validate_endpoint,
self.m_config, self.create_id(), bad_value)
def assert_endpoint_valid(self, original_endpoint):
endpoint = copy.deepcopy(original_endpoint)
try:
# First pass at validation, may canonicalise the data.
self.validate_endpoint(self.m_config, self.create_id(), endpoint)
canonical_endpoint = copy.deepcopy(endpoint)
# Second pass, should make no changes.
self.validate_endpoint(self.m_config, self.create_id(),
canonical_endpoint)
self.assertEqual(endpoint, canonical_endpoint)
except common.ValidationFailed as e:
_log.exception("Validation unexpectedly failed for %s",
original_endpoint)
self.fail("Validation unexpectedly failed for %s: %r" %
(original_endpoint, e))
def assert_tweak_invalidates_endpoint(self, **tweak):
use_prof_ids = "profile_id" not in tweak
valid_endpoint = self.valid_endpoint(use_prof_ids=use_prof_ids,
use_fip=self.use_fip_by_default)
self.assert_endpoint_valid(valid_endpoint)
invalid_endpoint = valid_endpoint.copy()
for key, value in tweak.iteritems():
if value is MISSING:
invalid_endpoint.pop(key)
else:
invalid_endpoint[key] = value
self.assert_invalid_endpoint(invalid_endpoint)
class TestValidateWloadEndpoint(_BaseTestValidateEndpoint):
def validate_endpoint(self, *args, **kwargs):
common.validate_endpoint(*args, **kwargs)
def create_id(self):
return WloadEndpointId("localhost", "orchestrator",
"workload", "endpoint")
def valid_endpoint(self, use_fip=False, use_prof_ids=False):
ep = {
"state": "active",
"name": "tap1234",
"mac": "AA:bb:cc:dd:ee:ff",
"ipv4_nets": ["10.0.1/32"],
"ipv4_gateway": "192.168.3.11",
"ipv6_nets": ["2001:0::1/128"],
"ipv6_gateway": "fe80:0::1",
}
if use_prof_ids == MISSING:
pass
elif use_prof_ids:
ep["profile_ids"] = ["prof1", "prof2"]
else:
ep["profile_id"] = "prof1"
if use_fip:
ep.update({
"ipv4_nat": [{"int_ip": "10.0.1.0", "ext_ip": "192.168.1"}],
"ipv6_nat": [{"int_ip": "2001::1", "ext_ip": "2001::2"}],
})
return ep
def canonical_valid_endpoint(self, use_fip=False, use_prof_ids=False):
ep = {
'state': 'active',
'name': 'tap1234',
'mac': 'aa:bb:cc:dd:ee:ff',
'ipv4_nets': ['10.0.1.0/32'],
'ipv4_gateway': '192.168.3.11',
'ipv6_nets': ['2001::1/128'],
'ipv6_gateway': 'fe80::1',
}
if use_prof_ids == MISSING:
ep["profile_ids"] = []
elif use_prof_ids:
ep["profile_ids"] = ["prof1", "prof2"]
else:
ep["profile_ids"] = ["prof1"] # Normalised to a list.
if use_fip:
ep.update({
"ipv4_nat": [{"int_ip": "10.0.1.0", "ext_ip": "192.168.0.1"}],
'ipv6_nat': [{'int_ip': '2001::1', 'ext_ip': '2001::2'}],
})
return ep
def test_validate_endpoint_mainline_fip(self):
self.do_canonicalisation_test(use_fip=True)
def test_validate_endpoint_failures(self):
self.assert_tweak_invalidates_endpoint(state=MISSING)
self.assert_tweak_invalidates_endpoint(state=None)
self.assert_tweak_invalidates_endpoint(state="foo")
self.assert_tweak_invalidates_endpoint(name=MISSING)
self.assert_tweak_invalidates_endpoint(name=None)
self.assert_tweak_invalidates_endpoint(name="")
self.assert_tweak_invalidates_endpoint(name=object())
self.assert_tweak_invalidates_endpoint(name=[])
self.assert_tweak_invalidates_endpoint(name="incorrect_prefix")
self.assert_tweak_invalidates_endpoint(mac=object())
self.assert_tweak_invalidates_endpoint(mac="bad MAC")
self.assert_tweak_invalidates_endpoint(profile_id=None)
self.assert_tweak_invalidates_endpoint(profile_id=[])
self.assert_tweak_invalidates_endpoint(ipv4_gateway="not an IP")
self.assert_tweak_invalidates_endpoint(ipv4_gateway=[])
self.assert_tweak_invalidates_endpoint(ipv6_gateway="not an IP")
self.assert_tweak_invalidates_endpoint(ipv6_gateway=[])
self.assert_tweak_invalidates_endpoint(ipv4_nets="not a list")
self.assert_tweak_invalidates_endpoint(ipv4_nets={})
self.assert_tweak_invalidates_endpoint(ipv4_nets=["not an IP"])
self.assert_tweak_invalidates_endpoint(ipv4_nets=["12345"])
self.assert_tweak_invalidates_endpoint(ipv4_nets=["fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/64"])
self.assert_tweak_invalidates_endpoint(ipv6_nets="not a list")
self.assert_tweak_invalidates_endpoint(ipv6_nets={})
self.assert_tweak_invalidates_endpoint(ipv6_nets=["not an IP"])
self.assert_tweak_invalidates_endpoint(ipv6_nets=["12345"])
self.assert_tweak_invalidates_endpoint(ipv6_nets=["10.0.0.0/8"])
self.assert_tweak_invalidates_endpoint(
expected_ipv4_addrs=["10.0.0.1"])
self.assert_tweak_invalidates_endpoint(expected_ipv4_addrs={})
self.assert_tweak_invalidates_endpoint(
expected_ipv6_addrs=["10.0.0.1"])
self.assert_tweak_invalidates_endpoint(expected_ipv6_addrs={})
self.assert_tweak_invalidates_endpoint(ipv4_nets=["10.1.2.3/32"],
ipv4_nat=[{"int_ip": "10.1.2.4",
"ext_ip": "1.2.3.4"}])
def test_validate_endpoint(self):
# This test method hit s afew cases that we don't hit above but it's
# hard to understand. Please don't add more tests like this!
combined_id = WloadEndpointId("host", "orchestrator",
"workload", "valid_name-ok.")
endpoint_dict = {'profile_id': "valid.prof-name",
'state': "active",
'name': "tapabcdef",
'mac': "78:2b:cb:9f:ae:1c",
'ipv4_nets': [],
'ipv6_nets': []}
config = Config('tap', 'localhost')
ep_copy = endpoint_dict.copy()
self.validate_endpoint(config, combined_id, ep_copy)
self.assertTrue(ep_copy.get('profile_id') is None)
self.assertEqual(ep_copy.get('profile_ids'), ["valid.prof-name"])
# Now break it various ways.
# Bad endpoint ID.
for bad_str in ("with spaces", "$stuff", "^%@"):
bad_id = WloadEndpointId("host", "orchestrator", "workload",
bad_str)
with self.assertRaisesRegexp(ValidationFailed,
"Invalid endpoint ID"):
self.validate_endpoint(config, bad_id,
endpoint_dict.copy())
# Bad dictionary.
with self.assertRaisesRegexp(ValidationFailed,
"Expected endpoint to be a dict"):
self.validate_endpoint(config, combined_id, [1, 2, 3])
# No state, invalid state.
bad_dict = endpoint_dict.copy()
del bad_dict['state']
with self.assertRaisesRegexp(ValidationFailed,
"Missing 'state' field"):
self.validate_endpoint(config, combined_id, bad_dict)
bad_dict['state'] = "invalid"
with self.assertRaisesRegexp(ValidationFailed,
"Expected 'state' to be"):
self.validate_endpoint(config, combined_id, bad_dict)
# Missing name.
bad_dict = endpoint_dict.copy()
del bad_dict['name']
with self.assertRaisesRegexp(ValidationFailed,
"Missing 'name' field"):
self.validate_endpoint(config, combined_id, bad_dict)
# It's OK to be missing a MAC.
ok_dict = endpoint_dict.copy()
del ok_dict['mac']
self.validate_endpoint(config, combined_id, ok_dict)
bad_dict['name'] = [1, 2, 3]
bad_dict['mac'] = 73
with self.assertRaisesRegexp(ValidationFailed,
"Expected 'name' to be a string.*" +
"Invalid MAC"):
self.validate_endpoint(config, combined_id, bad_dict)
# Bad profile ID
bad_dict = endpoint_dict.copy()
bad_dict['profile_id'] = "str£ing"
with self.assertRaisesRegexp(ValidationFailed,
"Invalid profile ID"):
self.validate_endpoint(config, combined_id, bad_dict)
bad_dict = endpoint_dict.copy()
del bad_dict['profile_id']
bad_dict['profile_ids'] = [1, 2, 3]
with self.assertRaisesRegexp(ValidationFailed,
"Expected profile IDs to be strings"):
self.validate_endpoint(config, combined_id, bad_dict)
# Bad interface name - acceptable if not local.
bad_dict = endpoint_dict.copy()
bad_dict['name'] = "vethabcdef"
self.validate_endpoint(config, combined_id, bad_dict)
local_id = WloadEndpointId("localhost", "orchestrator",
"workload", "valid_name-ok.")
with self.assertRaisesRegexp(ValidationFailed,
"does not start with"):
self.validate_endpoint(config, local_id, bad_dict)
# Valid networks.
good_dict = endpoint_dict.copy()
good_dict['ipv4_nets'] = ["1.2.3.4/32", "192.168.3.11/8", "3.4.5.6"]
good_dict['ipv6_nets'] = ["::1/128", "::",
"2001:db8:abc:1400::/54"]
self.validate_endpoint(config, combined_id, good_dict.copy())
# Invalid networks
bad_dict = good_dict.copy()
bad_dict['ipv4_nets'] = ["1.2.3.4/32", "192.168.3.11/8",
"2001:db8:abc:1400::/54"]
with self.assertRaisesRegexp(ValidationFailed,
"not a valid IPv4 CIDR"):
self.validate_endpoint(config, combined_id, bad_dict.copy())
bad_dict['ipv4_nets'] = ["1.2.3.4/32", "192.168.3.11/8", "nonsense"]
with self.assertRaisesRegexp(ValidationFailed,
"not a valid IPv4 CIDR"):
self.validate_endpoint(config, combined_id, bad_dict.copy())
bad_dict = good_dict.copy()
bad_dict['ipv6_nets'] = ["::1/128", "::", "1.2.3.4/8"]
with self.assertRaisesRegexp(ValidationFailed,
"not a valid IPv6 CIDR"):
self.validate_endpoint(config, combined_id, bad_dict.copy())
bad_dict['ipv6_nets'] = ["::1/128", "::", "nonsense"]
with self.assertRaisesRegexp(ValidationFailed,
"not a valid IPv6 CIDR"):
self.validate_endpoint(config, combined_id, bad_dict.copy())
# Gateway IPs.
good_dict['ipv4_gateway'] = "192.168.3.11"
good_dict['ipv6_gateway'] = "2001:db8:abc:1400::"
self.validate_endpoint(config, combined_id, good_dict.copy())
bad_dict = good_dict.copy()
bad_dict['ipv4_gateway'] = "2001:db8:abc:1400::"
with self.assertRaisesRegexp(ValidationFailed,
"not a valid IPv4 gateway"):
self.validate_endpoint(config, combined_id, bad_dict.copy())
bad_dict['ipv4_gateway'] = "nonsense"
with self.assertRaisesRegexp(ValidationFailed,
"not a valid IPv4 gateway"):
self.validate_endpoint(config, combined_id, bad_dict.copy())
bad_dict = good_dict.copy()
bad_dict['ipv6_gateway'] = "1.2.3.4"
with self.assertRaisesRegexp(ValidationFailed,
"not a valid IPv6 gateway"):
self.validate_endpoint(config, combined_id, bad_dict.copy())
bad_dict['ipv6_gateway'] = "nonsense"
with self.assertRaisesRegexp(ValidationFailed,
"not a valid IPv6 gateway"):
self.validate_endpoint(config, combined_id, bad_dict.copy())
# Labels, empty.
good_dict["labels"] = {}
self.validate_endpoint(config, combined_id, good_dict)
self.assertEqual(good_dict["labels"], {})
# Labels, valid.
good_dict["labels"] = {"a": "b"}
self.validate_endpoint(config, combined_id, good_dict)
self.assertEqual(good_dict["labels"], {"a": "b"})
# Labels, bad type.
bad_dict = good_dict.copy()
bad_dict["labels"] = []
with self.assertRaisesRegexp(ValidationFailed,
"Expected labels to be a dict"):
self.validate_endpoint(config, combined_id, bad_dict.copy())
# Labels, bad value.
bad_dict = good_dict.copy()
bad_dict["labels"] = {"a": {}}
with self.assertRaisesRegexp(ValidationFailed,
"Invalid label value"):
self.validate_endpoint(config, combined_id, bad_dict.copy())
# Labels, bad key.
bad_dict = good_dict.copy()
bad_dict["labels"] = {"a+|%": {}}
with self.assertRaisesRegexp(ValidationFailed,
"Invalid label name 'a+|%'."):
self.validate_endpoint(config, combined_id, bad_dict.copy())
class TestValidateHostEndpoint(_BaseTestValidateEndpoint):
"""Tests for host endpoint-specific validation."""
use_fip_by_default = False
def validate_endpoint(self, *args, **kwargs):
common.validate_host_endpoint(*args, **kwargs)
def create_id(self):
return HostEndpointId("localhost", "endpoint")
def valid_endpoint(self, use_fip=False, use_prof_ids=True,
use_exp_ips=True):
ep = {
"labels": {
"a": "b",
"c": "d",
}
}
if use_exp_ips:
ep["expected_ipv4_addrs"] = ["10.0.1", "1.2.3.4"]
ep["expected_ipv6_addrs"] = ["2001:0::1"]
else:
# Note: name doesn't start with tap, which is OK.
ep["name"] = "eth0"
if use_prof_ids == MISSING:
pass
elif use_prof_ids:
ep["profile_ids"] = ["prof1", "prof2"]
else:
ep["profile_id"] = "prof1"
if use_fip:
raise NotImplementedError()
return ep
def canonical_valid_endpoint(self, use_fip=False, use_prof_ids=True,
use_exp_ips=True):
ep = {
"labels": {
"a": "b",
"c": "d",
}
}
if use_exp_ips:
ep["expected_ipv4_addrs"] = ["10.0.0.1", "1.2.3.4"]
ep["expected_ipv6_addrs"] = ["2001::1"]
else:
ep["name"] = "eth0"
if use_prof_ids == MISSING:
ep["profile_ids"] = []
elif use_prof_ids:
ep["profile_ids"] = ["prof1", "prof2"]
else:
ep["profile_ids"] = ["prof1"] # Normalised to a list.
if use_fip:
raise NotImplementedError()
return ep
def test_exp_ip_canon(self):
self.do_canonicalisation_test(use_exp_ips=True)
def test_no_exp_ip_canon(self):
self.do_canonicalisation_test(use_exp_ips=False)
def test_validate_endpoint_failures(self):
self.assert_tweak_invalidates_endpoint(state="active")
self.assert_tweak_invalidates_endpoint(state="inactive")
self.assert_tweak_invalidates_endpoint(state=[])
self.assert_tweak_invalidates_endpoint(mac="11:22:33:44:55:66")
self.assert_tweak_invalidates_endpoint(mac="inactive")
self.assert_tweak_invalidates_endpoint(mac=[])
self.assert_tweak_invalidates_endpoint(ipv4_nets=[])
self.assert_tweak_invalidates_endpoint(ipv4_nets=["10.0.0.1"])
self.assert_tweak_invalidates_endpoint(ipv4_gateway=["10.0.0.1"])
self.assert_tweak_invalidates_endpoint(ipv4_nat=[])
self.assert_tweak_invalidates_endpoint(ipv6_nets=[])
self.assert_tweak_invalidates_endpoint(ipv6_nets=["fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"])
self.assert_tweak_invalidates_endpoint(ipv6_gateway=["fc00:e968:6179::de52:7100"])
self.assert_tweak_invalidates_endpoint(ipv6_nat=[])
self.assert_tweak_invalidates_endpoint(expected_ipv4_addrs={})
self.assert_tweak_invalidates_endpoint(expected_ipv4_addrs="10.0.0.1")
self.assert_tweak_invalidates_endpoint(expected_ipv4_addrs=["10.0.Z"])
self.assert_tweak_invalidates_endpoint(expected_ipv4_addrs=MISSING,
expected_ipv6_addrs=MISSING)
self.assert_tweak_invalidates_endpoint(expected_ipv6_addrs={})
self.assert_tweak_invalidates_endpoint(expected_ipv6_addrs="10.0.0.1")
self.assert_tweak_invalidates_endpoint(expected_ipv6_addrs=["10.0.Z"])
``` |
{
"source": "a0x8o/geomesa-tutorials",
"score": 3
} |
#### File: geomesa-quickstart-python/pyJavaClasses/customJavaClasses.py
```python
import sys
def getJavaTrueFalse(JNI, pyBool):
''' This function reflects the Java Boolean class and returns a True/False
request as a java object. '''
BoolDict = {"true":True,
"yes":True,
"false":False,
"no":False
}
class Boolean(JNI.JavaClass):
__javaclass__ = 'java/lang/Object'
__metaclass__ = JNI.MetaJavaClass
TRUE = JNI.JavaObject('()Zjava/lang/Object;', static=True)
FALSE = JNI.JavaObject('()Zjava/lang/Object;', static=True)
if isinstance(pyBool, str):
boolVal = BoolDict.get(pyBool.lower(), None)
if not isinstance(boolVal, bool):
print("Error:bad value passed to 'getJavaTrueFalse'; {} must be in str(true, false, yes, no).".format(pyBool))
sys.exit(-1)
elif isinstance(pyBool, bool):
boolVal = pyBool
else:
print("Error:bad value passed to 'getJavaTrueFalse'; {} must be type bool or in str(true, false, yes, no).".format(pyBool))
sys.exit(-1)
jBool = Boolean()
if boolVal:
jBoolVal = jBool.TRUE
else:
jBoolVal = jBool.FALSE
return jBoolVal
```
#### File: geomesa-quickstart-python/pyJavaClasses/SimpleFeatureType.py
```python
class SimpleFeatureType():
def __init__(self, JNI):
""" Create the necessary java class interfaces: """
self.SimpleFeatureTypes = JNI.autoclass("org.locationtech.geomesa.utils.interop.SimpleFeatureTypes")
def createSimpleFeatureType(self, simpleFeatureTypeName, attribute_list):
""" This will create a 'bare' simpleFeatureType from a list of attributes: """
attributes = ','.join(attribute_list)
simpleFeatureType = self.SimpleFeatureTypes.createType(simpleFeatureTypeName, attributes)
return simpleFeatureType
def setDateTimeIndex(self, simpleFeature, field_name):
""" use the user-data (hints) to specify which date-time field is meant to be indexed: """
simpleFeature.getUserData().put(self.SimpleFeatureTypes.DEFAULT_DATE_KEY, field_name)
```
#### File: tools/ECQL/filter.py
```python
class ECQLQuery:
""" This is a helper class to allow python access to java Query & CQL functions."""
def __init__(self, JNI):
''' This is sets up links to the java types & java Query and CQL functions.'''
self.jString = JNI.autoclass('java.lang.String')
self.Query = JNI.autoclass('org.geotools.data.Query')
self.CQL = JNI.autoclass('org.geotools.filter.text.cql2.CQL')
def createFilter(self, filter_string):
''' Create the ECQL filter from the prepared (python) string: '''
return self.CQL.toFilter(self.jString(filter_string))
def createQuery(self, simpleFeatureTypeName, dataStore, filter_string):
''' Return an ECQL filter query for iterating & additional processing: '''
jSFT_name = self.jString(simpleFeatureTypeName)
cqlFilter = self.createFilter(filter_string)
return self.Query(jSFT_name, cqlFilter)
def getFeatures(self, simpleFeatureTypeName, dataStore, filter_string):
''' Build & execute an ECQL query from a filter string for the DataStore.
Return an ECQL filter query for iterating & additional processing: '''
jSFT_name = self.jString(simpleFeatureTypeName)
query = self.createQuery(simpleFeatureTypeName, dataStore, filter_string)
#query = ecql.queryFeatures(simpleFeatureTypeName, dataStore, filter_string)
''' Submit the query, which will return an iterator over matching features: '''
featureSource = dataStore.getFeatureSource(jSFT_name)
#featureItr = featureSource.getFeatures(query).features()
return featureSource.getFeatures(query)
def createBBoxFilter(geomField, x0, y0, x1, y1):
''' Create a bounding-box (BBOX) filter of a rectangular query area: '''
cqlGeometry = "BBOX({}, {}, {}, {}, {})".format(geomField, x0, y0, x1, y1)
return cqlGeometry
def createWithinFilter(geomField, poly_descriptor):
''' Create a container (WITHIN) filter of a query area described by a polygon or
multipolygon WKT: '''
cqlGeometry = "WITHIN({}, {})".format(geomField, poly_descriptor)
return cqlGeometry
def createDuringFilter(dateField, t0, t1):
''' Create a temporal query string for a fixed range of date/times (DURING): '''
cqlDates = "({} DURING {}/{})".format(dateField, t0, t1)
return cqlDates
def createAttributeFilter(attributesQuery):
''' The GeoTools Filter constant "INCLUDE" is a default meant to accept
everything else (other predicates that operate on other attribute types): '''
cqlAttributes = "INCLUDE" if attributesQuery is None else attributesQuery
return cqlAttributes
```
#### File: tools/ECQL/queryFormatters.py
```python
from __future__ import print_function
import json
def queryFeaturesToDict(ecql, simpleFeatureTypeName, dataStore, filter_string, field_dict, print_num=10):
''' Return the results of a ECQL filter query as a dict for additional processing: '''
type_dict = {"date":lambda v : v.toString(),
"double":lambda v : v,
"float":lambda v : v,
"integer":lambda v : v,
"long":lambda v : v,
"point":lambda v : {"X":v.x, "Y":v.y, "geom":v},
"string":lambda v : v
}
''' Get field names and types for processing and type conversion: '''
fields = {key:val["type"].lower() for key, val in field_dict.items()}
''' Submit the query, which will return a features object: '''
features = ecql.getFeatures(simpleFeatureTypeName, dataStore, filter_string)
''' Get an iterator of the matching features: '''
featureIter = features.features()
''' Loop through all results and put them into a dictionary for secondary processing: '''
n = 0
results = {}
while featureIter.hasNext():
feature = featureIter.next()
n += 1
record = {}
for key, fType in fields.items():
record[key] = type_dict[fType](feature.getProperty(key).getValue())
results[n] = record
featureIter.close()
if print_num is not None and print_num > 0 and n <= print_num:
print("N|{}".format("|".join(record.keys())))
for i in range(print_num):
p = "|".join(["{}".format(v) for v in record.values()])
print("{}|{}".format(i, p))
return results
def queryFeaturesToList(ecql, simpleFeatureTypeName, dataStore, filter_string):
''' Return the results of a ECQL filter query as a list of GeoMesa java objects for additional processing: '''
''' Submit the query, which will return a features object: '''
features = ecql.getFeatures(simpleFeatureTypeName, dataStore, filter_string)
''' Get an array (list) of the matching features as GeoMesa java objects: '''
results = features.toArray()
return results
def queryFeaturesToJSON(ecql, simpleFeatureTypeName, dataStore, filter_string, field_dict, out_file):
''' Return the results of a ECQL filter query as a dict for additional processing: '''
type_dict = {"date":lambda v : v.toString(),
"double":lambda v : v,
"float":lambda v : v,
"integer":lambda v : v,
"long":lambda v : v,
"point":lambda v : {"X":v.x, "Y":v.y},
"string":lambda v : v
}
''' Get field names and types for processing and type conversion: '''
fields = {key:val["type"].lower() for key, val in field_dict.items()}
''' Submit the query, which will return a features object: '''
features = ecql.getFeatures(simpleFeatureTypeName, dataStore, filter_string)
''' Get an iterator of the matching features: '''
featureIter = features.features()
''' Loop through all results and put them into a dictionary for secondary processing: '''
n = 0
with open(out_file, "w") as json_file:
while featureIter.hasNext():
n += 1
feature = featureIter.next()
record = {}
for key, fType in fields.items():
record[key] = type_dict[fType](feature.getProperty(key).getValue())
print(json.dumps(record), file=json_file)
del record
featureIter.close()
return n
``` |
{
"source": "a0x8o/iceberg",
"score": 2
} |
#### File: api/expressions/literals.py
```python
import datetime
from decimal import (Decimal,
ROUND_HALF_UP)
import uuid
import pytz
from .expression import (FALSE,
TRUE)
from .java_variables import (JAVA_MAX_FLOAT,
JAVA_MIN_FLOAT)
from ..types.type import TypeID
class Literals(object):
EPOCH = datetime.datetime.utcfromtimestamp(0)
EPOCH_DAY = EPOCH.date()
@staticmethod
def from_(value): # noqa: C901
if value is None:
raise RuntimeError("Cannot create an expression literal from None")
if isinstance(value, bool):
return BooleanLiteral(value)
elif isinstance(value, int):
if Literal.JAVA_MIN_INT < value < Literal.JAVA_MAX_INT:
return IntegerLiteral(value)
return LongLiteral(value)
elif isinstance(value, float):
if Literal.JAVA_MIN_FLOAT < value < Literal.JAVA_MAX_FLOAT:
return FloatLiteral(value)
return DoubleLiteral(value)
elif isinstance(value, str):
return StringLiteral(value)
elif isinstance(value, uuid.UUID):
return UUIDLiteral(value)
elif isinstance(value, bytearray):
return BinaryLiteral(value)
elif isinstance(value, bytes):
return FixedLiteral(value)
elif isinstance(value, Decimal):
return DecimalLiteral(value)
else:
raise RuntimeError("Unimplemented Type Literal")
@staticmethod
def above_max():
return ABOVE_MAX
@staticmethod
def below_min():
return BELOW_MIN
class Literal(object):
JAVA_MAX_INT = 2147483647
JAVA_MIN_INT = -2147483648
JAVA_MAX_FLOAT = 3.4028235E38
JAVA_MIN_FLOAT = -3.4028235E38
@staticmethod
def of(value): # noqa: C901
if isinstance(value, bool):
return BooleanLiteral(value)
elif isinstance(value, int):
if value < Literal.JAVA_MIN_INT or value > Literal.JAVA_MAX_INT:
return LongLiteral(value)
return IntegerLiteral(value)
elif isinstance(value, float):
if value < Literal.JAVA_MIN_FLOAT or value > Literal.JAVA_MAX_FLOAT:
return DoubleLiteral(value)
return FloatLiteral(value)
elif isinstance(value, str):
return StringLiteral(value)
elif isinstance(value, uuid.UUID):
return UUIDLiteral(value)
elif isinstance(value, bytes):
return FixedLiteral(value)
elif isinstance(value, bytearray):
return BinaryLiteral(value)
elif isinstance(value, Decimal):
return DecimalLiteral(value)
def to(self, type):
raise NotImplementedError()
class BaseLiteral(Literal):
def __init__(self, value):
self.value = value
def to(self, type):
raise NotImplementedError()
def __eq__(self, other):
if id(self) == id(other):
return True
elif other is None or not isinstance(other, BaseLiteral):
return False
return self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "BaseLiteral(%s)" % str(self.value)
def __str__(self):
return str(self.value)
class ComparableLiteral(BaseLiteral):
def __init__(self, value):
super(ComparableLiteral, self).__init__(value)
def to(self, type):
raise NotImplementedError()
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if self.value is None:
return True
if other is None or other.value is None:
return False
return self.value < other.value
def __gt__(self, other):
if self.value is None:
return False
if other is None or other.value is None:
return True
return self.value > other.value
def __le__(self, other):
if self.value is None:
return True
if other is None or other.value is None:
return False
return self.value <= other.value
def __ge__(self, other):
if self.value is None:
return False
if other is None or other.value is None:
return True
return self.value >= other.value
class AboveMax(Literal):
def __init__(self):
super(AboveMax, self).__init__()
def value(self):
raise RuntimeError("AboveMax has no value")
def to(self, type):
raise RuntimeError("Cannot change the type of AboveMax")
def __str__(self):
return "aboveMax"
class BelowMin(Literal):
def __init__(self):
super(BelowMin, self).__init__()
def value(self):
raise RuntimeError("BelowMin has no value")
def to(self, type):
raise RuntimeError("Cannot change the type of BelowMin")
def __str__(self):
return "belowMin"
class BooleanLiteral(ComparableLiteral):
def __init__(self, value):
super(BooleanLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.BOOLEAN:
return self
class IntegerLiteral(ComparableLiteral):
def __init__(self, value):
super(IntegerLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.INTEGER:
return self
elif type_var.type_id == TypeID.LONG:
return LongLiteral(self.value)
elif type_var.type_id == TypeID.FLOAT:
return FloatLiteral(float(self.value))
elif type_var.type_id == TypeID.DOUBLE:
return DoubleLiteral(float(self.value))
elif type_var.type_id == TypeID.DATE:
return DateLiteral(self.value)
elif type_var.type_id == TypeID.DECIMAL:
if type_var.scale == 0:
return DecimalLiteral(Decimal(self.value))
else:
return DecimalLiteral(Decimal(self.value)
.quantize(Decimal("." + "".join(["0" for i in range(1, type_var.scale)]) + "1"),
rounding=ROUND_HALF_UP))
class LongLiteral(ComparableLiteral):
def __init__(self, value):
super(LongLiteral, self).__init__(value)
def to(self, type_var): # noqa: C901
if type_var.type_id == TypeID.INTEGER:
if Literal.JAVA_MAX_INT < self.value:
return ABOVE_MAX
elif Literal.JAVA_MIN_INT > self.value:
return BELOW_MIN
return IntegerLiteral(self.value)
elif type_var.type_id == TypeID.LONG:
return self
elif type_var.type_id == TypeID.FLOAT:
return FloatLiteral(float(self.value))
elif type_var.type_id == TypeID.DOUBLE:
return DoubleLiteral(float(self.value))
elif type_var.type_id == TypeID.TIME:
return TimeLiteral(self.value)
elif type_var.type_id == TypeID.TIMESTAMP:
return TimestampLiteral(self.value)
elif type_var.type_id == TypeID.DECIMAL:
if type_var.scale == 0:
return DecimalLiteral(Decimal(self.value))
else:
return DecimalLiteral(Decimal(self.value)
.quantize(Decimal("." + "".join(["0" for i in range(1, type_var.scale)]) + "1"),
rounding=ROUND_HALF_UP))
class FloatLiteral(ComparableLiteral):
def __init__(self, value):
super(FloatLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.FLOAT:
return self
elif type_var.type_id == TypeID.DOUBLE:
return DoubleLiteral(self.value)
elif type_var.type_id == TypeID.DECIMAL:
if type_var.scale == 0:
return DecimalLiteral(Decimal(self.value)
.quantize(Decimal('1.'),
rounding=ROUND_HALF_UP))
else:
return DecimalLiteral(Decimal(self.value)
.quantize(Decimal("." + "".join(["0" for i in range(1, type_var.scale)]) + "1"),
rounding=ROUND_HALF_UP))
class DoubleLiteral(ComparableLiteral):
def __init__(self, value):
super(DoubleLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.FLOAT:
if JAVA_MAX_FLOAT < self.value:
return ABOVE_MAX
elif JAVA_MIN_FLOAT > self.value:
return BELOW_MIN
return FloatLiteral(self.value)
elif type_var.type_id == TypeID.DOUBLE:
return self
elif type_var.type_id == TypeID.DECIMAL:
if type_var.scale == 0:
return DecimalLiteral(Decimal(self.value)
.quantize(Decimal('1.'),
rounding=ROUND_HALF_UP))
else:
return DecimalLiteral(Decimal(self.value)
.quantize(Decimal("." + "".join(["0" for i in range(1, type_var.scale)]) + "1"),
rounding=ROUND_HALF_UP))
class DateLiteral(ComparableLiteral):
def __init__(self, value):
super(DateLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.DATE:
return self
class TimeLiteral(ComparableLiteral):
def __init__(self, value):
super(TimeLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.TIME:
return self
class TimestampLiteral(ComparableLiteral):
def __init__(self, value):
super(TimestampLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.TIMESTAMP:
return self
elif type_var.type_id == TypeID.DATE:
return DateLiteral((datetime.datetime.fromtimestamp(self.value / 1000000) - Literals.EPOCH).days)
class DecimalLiteral(ComparableLiteral):
def __init__(self, value):
super(DecimalLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.DECIMAL and type_var.scale == abs(self.value.as_tuple().exponent):
return self
class StringLiteral(BaseLiteral):
def __init__(self, value):
super(StringLiteral, self).__init__(value)
def to(self, type_var): # noqa: C901
import dateutil.parser
if type_var.type_id == TypeID.DATE:
return DateLiteral((dateutil.parser.parse(self.value) - Literals.EPOCH).days)
elif type_var.type_id == TypeID.TIME:
return TimeLiteral(
int((dateutil.parser.parse(Literals.EPOCH.strftime("%Y-%m-%d ") + self.value) - Literals.EPOCH)
.total_seconds() * 1000000))
elif type_var.type_id == TypeID.TIMESTAMP:
timestamp = dateutil.parser.parse(self.value)
EPOCH = Literals.EPOCH
if bool(timestamp.tzinfo) != bool(type_var.adjust_to_utc):
raise RuntimeError("Cannot convert to %s when string is: %s" % (type_var, self.value))
if timestamp.tzinfo is not None:
EPOCH = EPOCH.replace(tzinfo=pytz.UTC)
return TimestampLiteral(int((timestamp - EPOCH).total_seconds() * 1000000))
elif type_var.type_id == TypeID.STRING:
return self
elif type_var.type_id == TypeID.UUID:
return UUIDLiteral(uuid.UUID(self.value))
elif type_var.type_id == TypeID.DECIMAL:
dec_val = Decimal(str(self.value))
if abs(dec_val.as_tuple().exponent) == type_var.scale:
if type_var.scale == 0:
return DecimalLiteral(Decimal(str(self.value))
.quantize(Decimal('1.'),
rounding=ROUND_HALF_UP))
else:
return DecimalLiteral(Decimal(str(self.value))
.quantize(Decimal("." + "".join(["0" for i in range(1, type_var.scale)]) + "1"),
rounding=ROUND_HALF_UP))
def __eq__(self, other):
if id(self) == id(other):
return True
if other is None or not isinstance(other, StringLiteral):
return False
return self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if other is None:
return False
return self.value < other.value
def __gt__(self, other):
if other is None:
return True
return self.value > other.value
def __le__(self, other):
if other is None:
return False
return self.value <= other.value
def __ge__(self, other):
if other is None:
return True
return self.value >= other.value
def __str__(self):
return '"' + self.value + '"'
class UUIDLiteral(ComparableLiteral):
def __init__(self, value):
super(UUIDLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.UUID:
return self
class FixedLiteral(BaseLiteral):
def __init__(self, value):
super(FixedLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.FIXED:
if len(self.value) == type_var.length:
return self
elif type_var.type_id == TypeID.BINARY:
return BinaryLiteral(self.value)
def write_replace(self):
return FixedLiteralProxy(self.value)
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if other is None:
return False
return self.value < other.value
def __gt__(self, other):
if other is None:
return True
return self.value > other.value
def __le__(self, other):
if other is None:
return False
return self.value <= other.value
def __ge__(self, other):
if other is None:
return True
return self.value >= other.value
class BinaryLiteral(BaseLiteral):
def __init__(self, value):
super(BinaryLiteral, self).__init__(value)
def to(self, type_var):
if type_var.type_id == TypeID.FIXED:
if type_var.length == len(self.value):
return FixedLiteral(self.value)
return None
elif type_var.type_id == TypeID.BINARY:
return self
def write_replace(self):
return BinaryLiteralProxy(self.value)
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if other is None:
return False
return self.value < other.value
def __gt__(self, other):
if other is None:
return True
return self.value > other.value
def __le__(self, other):
if other is None:
return False
return self.value <= other.value
def __ge__(self, other):
if other is None:
return True
return self.value >= other.value
class FixedLiteralProxy(object):
def __init__(self, buffer=None):
if buffer is not None:
self.bytes = list(buffer)
def read_resolve(self):
return FixedLiteral(self.bytes)
class ConstantExpressionProxy(object):
def __init__(self, true_or_false=None):
if true_or_false is not None:
self.true_or_false = true_or_false
def read_resolve(self):
if self.true_or_false:
return TRUE
else:
return FALSE
class BinaryLiteralProxy(FixedLiteralProxy):
def __init__(self, buffer=None):
super(BinaryLiteralProxy, self).__init__(buffer)
def read_resolve(self):
return BinaryLiteral(self.bytes)
ABOVE_MAX = AboveMax()
BELOW_MIN = BelowMin()
```
#### File: core/util/profile.py
```python
from contextlib import contextmanager
import logging
import time
_logger = logging.getLogger(__name__)
@contextmanager
def profile(label, stats_dict=None):
if stats_dict is None:
_logger.debug('PROFILE: %s starting' % label)
start = time.time()
yield
took = int((time.time() - start) * 1000)
if stats_dict is None:
_logger.debug('PROFILE: %s completed in %dms' % (label, took))
else:
stats_dict[label] = stats_dict.get(label, 0) + took
```
#### File: iceberg/hive/hive_tables.py
```python
import logging
from multiprocessing import cpu_count
from multiprocessing.dummy import Pool
import threading
from typing import Set
from hmsclient import HMSClient, hmsclient
from iceberg.core import BaseMetastoreTables
from iceberg.core.util import WORKER_THREAD_POOL_SIZE_PROP
from iceberg.hive import HiveTableOperations
_logger = logging.getLogger(__name__)
# Handles physical deletion of files.
# Does not delete a file if it is referred more than once via Iceberg snapshot metadata pointers.
class DeleteFiles(object):
def __init__(self, ops: HiveTableOperations):
self.ops = ops
self.seen: Set[str] = set()
self.set_lock = threading.Lock()
def delete_file(self, path: str) -> None:
have_seen = True
with self.set_lock:
if path not in self.seen:
have_seen = False
self.seen.add(path)
if not have_seen:
_logger.info("Deleting file: {path}".format(path=path))
try:
self.ops.delete_file(path)
except OSError as e:
_logger.info("Error deleting file: {path}: {e}".format(path=path, e=e))
class HiveTables(BaseMetastoreTables):
_DOT = "."
THRIFT_URIS = "hive.metastore.uris"
def __init__(self, conf):
super(HiveTables, self).__init__(conf)
def new_table_ops(self, conf, database, table):
return HiveTableOperations(conf, self.get_client(), database, table)
def get_client(self) -> HMSClient:
from urllib.parse import urlparse
metastore_uri = urlparse(self.conf[HiveTables.THRIFT_URIS])
client = hmsclient.HMSClient(host=metastore_uri.hostname, port=metastore_uri.port)
return client
def drop(self, database: str, table: str, purge: bool = False) -> None:
ops = self.new_table_ops(self.conf, database, table)
metadata = ops.current()
# Drop from Hive Metastore
with self.get_client() as open_client:
_logger.info("Deleting {database}.{table} from Hive Metastore".format(database=database, table=table))
open_client.drop_table(database, table, deleteData=False)
if purge:
# Follow Iceberg metadata pointers to delete every file
if metadata is not None:
with Pool(self.conf.get(WORKER_THREAD_POOL_SIZE_PROP,
cpu_count())) as delete_pool:
deleter = DeleteFiles(ops)
for s in metadata.snapshots:
for m in s.manifests:
delete_pool.map(deleter.delete_file,
(i.path() for i in s.get_filtered_manifest(m.manifest_path).iterator()))
delete_pool.map(deleter.delete_file, (m.manifest_path for m in s.manifests))
if s.manifest_location is not None:
delete_pool.map(deleter.delete_file, [s.manifest_location])
delete_pool.map(deleter.delete_file, [ops.current_metadata_location])
```
#### File: iceberg/parquet/parquet_schema_utils.py
```python
from typing import List
from iceberg.api import Schema
from iceberg.api.types import get_projected_ids
def prune_columns(file_schema: Schema, expected_schema: Schema) -> List[str]:
"""
Given two Iceberg schema's returns a list of column_names for all id's in the
file schema that are projected in the expected schema
Parameters
----------
file_schema : iceberg.api.Schema
An Iceberg schema of the file being read
expected_schema : iceberg.api.Schema
An Iceberg schema of the final projection
Returns
-------
list
The column names in the file that matched ids in the expected schema
"""
return [column.name for column in file_schema.as_struct().fields
if column.id in get_projected_ids(expected_schema)]
```
#### File: tests/parquet/test_dataset_utils.py
```python
from iceberg.api.expressions import Expressions
from iceberg.parquet.dataset_utils import get_dataset_filter
import pyarrow.dataset as ds
import pytest
@pytest.mark.parametrize("expr, dataset_filter, column_map",
[(Expressions.greater_than('a', 1), ds.field('a') > 1, {'a': 'a'}),
(Expressions.greater_than_or_equal('a', 1), ds.field('a') >= 1, {'a': 'a'}),
(Expressions.less_than('a', 1), ds.field('a') < 1, {'a': 'a'}),
(Expressions.less_than_or_equal('a', 1), ds.field('a') <= 1, {'a': 'a'}),
(Expressions.equal('a', 1), ds.field('a') == 1, {'a': 'a'}),
(Expressions.not_equal('a', 1), ds.field('a') != 1, {'a': 'a'}),
(Expressions.not_null('a'), ds.field('a').is_valid(), {'a': 'a'}),
(Expressions.is_null('a'), ~ds.field('a').is_valid(), {'a': 'a'})
])
def test_simple(expr, dataset_filter, column_map):
translated_dataset_filter = get_dataset_filter(expr, column_map)
assert dataset_filter.equals(translated_dataset_filter)
def test_not_conversion():
expr = Expressions.not_(Expressions.greater_than('a', 1))
translated_dataset_filter = get_dataset_filter(expr, {'a': 'a'})
assert (~(ds.field("a") > 1)).equals(translated_dataset_filter)
def test_complex_expr():
expr = Expressions.or_(Expressions.and_(Expressions.greater_than('a', 1), Expressions.equal("b", "US")),
Expressions.equal("c", True))
translated_dataset_filter = get_dataset_filter(expr, {'a': 'a', 'b': 'b', 'c': 'c'})
dataset_filter = (((ds.field("a") > 1) & (ds.field("b") == "US")) | (ds.field("c") == True)) # noqa: E712
assert dataset_filter.equals(translated_dataset_filter)
``` |
{
"source": "a101269/Chinese_Semantic_Dependency_Parser_with_knowledge",
"score": 2
} |
#### File: a101269/Chinese_Semantic_Dependency_Parser_with_knowledge/evalute.py
```python
import sys
import re
import unicodedata
from argparse import ArgumentParser
INF = float('inf')
opts = None
engine = None
UNICODEPUNC = dict.fromkeys(i for i in range(sys.maxunicode)
if unicodedata.category(chr(i)).startswith('P'))
def stat_one_tree(lines):
stat_data = {}
for line in lines:
payload = line.strip().split("\t")
if(len(payload) < 7):
print(lines)
id_val = int(payload[0])
form_val = payload[1]
postag_val = payload[3]
head_val = payload[6]
deprel_val = payload[7]
if not opts.punctuation and engine(form_val, postag_val):
continue
if id_val not in stat_data:
stat_data[id_val] = {
"id": id_val,
"form": form_val,
"heads": [head_val],
"deprels": [deprel_val]
}
else:
assert(form_val == stat_data[id_val]["form"])
stat_data[id_val]["heads"].append(head_val)
stat_data[id_val]['deprels'].append(deprel_val)
return stat_data
def stat_one_node_heads_and_deprels(
gold_heads,
gold_deprels,
test_heads,
test_deprels):
# ! assert( len(gold_heads) == len(gold_deprels))
gold_len = len(gold_heads)
test_len = len(test_heads)
nr_right_heads = 0
nr_right_deprels = 0
nr_non_local_gold = 0
nr_non_local_test = 0
nr_non_local_right_heads = 0
nr_non_local_right_deprels = 0
is_head_right = True # ! set default True . The default value is important : `if the state is not changed , we'll not set it value any more`
is_deprel_right = True # ! set default True
assert(gold_len != 0 and test_len != 0)
if gold_len == 1 and test_len == 1:
#! normal situation
if(gold_heads[0] == test_heads[0]):
nr_right_heads = 1
if gold_deprels[0] == test_deprels[0]:
nr_right_deprels = 1
else:
is_deprel_right = False
else:
is_head_right = False
# ! Attention . If head is wrong , deprel should be set to wrong .
is_deprel_right = False
else:
#! Non local situation
if gold_len > 1:
is_gold_non_local = True
nr_non_local_gold = gold_len
nr_non_local_test = test_len
#! for Non local , if len(test_heads) != len(gold_heads) , the node 's head and deprels is not right
if nr_non_local_gold != nr_non_local_test:
is_deprel_right = False
is_head_right = False
#! find the right non-local head and deprel
#! if has wrong head or deprel , set the `is_head_right` or `is_deprel_right` to `False`
for gold_head, gold_deprel in zip(gold_heads, gold_deprels):
if gold_head in test_heads:
nr_right_heads += 1
head_idx = test_heads.index(gold_head)
# !! head_idx == deprel_idx
if gold_deprel == test_deprels[head_idx]:
nr_right_deprels += 1
else:
is_deprel_right = False
else:
is_head_right = False
is_deprel_right = False # !
#! here no local state equals to normal state
nr_non_local_right_heads = nr_right_heads
nr_non_local_right_deprels = nr_right_deprels
return (
gold_len,
test_len,
nr_right_heads,
nr_right_deprels,
nr_non_local_gold,
nr_non_local_test,
nr_non_local_right_heads,
nr_non_local_right_deprels,
is_head_right,
is_deprel_right)
def stat_gold_and_test_data(gold_stat_data, test_stat_data):
nr_gold_rels = 0
nr_test_rels = 0
nr_head_right = 0
nr_deprel_right = 0
sentence_all_heads_is_right = True
sentence_all_deprels_is_right = True
nr_gold_non_local = 0
nr_test_non_local = 0
nr_head_non_local_right = 0
nr_deprel_non_local_right = 0
for idx in gold_stat_data.keys():
gold_node = gold_stat_data[idx]
test_node = test_stat_data[idx]
assert(gold_node['id'] == test_node['id'])
(
gold_rels_len,
test_rels_len,
nr_one_node_right_head,
nr_one_node_right_deprel,
gold_non_local_rels,
test_non_local_rels,
nr_one_node_non_local_right_head,
nr_one_node_non_local_right_deprel,
is_one_node_head_right,
is_one_node_deprel_right) = (
stat_one_node_heads_and_deprels(
gold_node['heads'],
gold_node['deprels'],
test_node['heads'],
test_node['deprels']))
nr_gold_rels += gold_rels_len
nr_test_rels += test_rels_len
nr_head_right += nr_one_node_right_head
nr_deprel_right += nr_one_node_right_deprel
nr_gold_non_local += gold_non_local_rels
nr_test_non_local += test_non_local_rels
nr_head_non_local_right += nr_one_node_non_local_right_head
nr_deprel_non_local_right += nr_one_node_non_local_right_deprel
sentence_all_heads_is_right &= is_one_node_head_right
sentence_all_deprels_is_right &= is_one_node_deprel_right
return (
nr_gold_rels,
nr_test_rels,
nr_head_right,
nr_deprel_right,
nr_gold_non_local,
nr_test_non_local,
nr_head_non_local_right,
nr_deprel_non_local_right,
sentence_all_heads_is_right,
sentence_all_deprels_is_right)
if __name__ == "__main__":
description = "Official Evaluation Script for Semeval2016 Task9 Chinese Semantic Dependency Parsing"
parser = ArgumentParser(description=description)
parser.add_argument(
"--reference",
dest="reference",
help="path to reference(gold) data",
# required=True
default='D:\研究生\Parser_with_knowledge\dataset\\result\sdp_text_test.conllu.sem16.sdp'
)
parser.add_argument(
"--answer",
dest="answer",
help="path to answer(test) data",
default='D:\研究生\graduation\\test_file\\text_eval_temp.conllu_post.sem16'
#required=True
)
parser.add_argument(
"--language",
dest="language",
default="ch",
help="specify language . 'universal' is defaulted. ")
parser.add_argument(
"--punctuation",
dest="punctuation",
default=False,
action="store_true",
help="specify to include punctuation in evaluation. default ignored")
parser.add_argument(
"--ignore",
dest="ignore",
default=None,
help="ignore form . A char is a valid ignore form . default is None .")
parser.add_argument(
"--debug",
dest="debug",
default=True,
action="store_true",
help="if set , statistic info will be output . default not set.")
opts = parser.parse_args()
if opts.language == "en":
# English punctuation list is obtained from
# http://en.wikipedia.org/wiki/Punctuation_of_English
def engine(x, y): return x in ("'", "''", # apostrophe
# brackets
"(", ")", "[", "]", "{", "}", "-LRB-", "-RRB-", "-LSB-", "-RSB-", "-LCB-", "-RCB-",
":", # colon
",", # comma
"-", "--", # dash
"...", # ellipsis
"!", # exclamation mark
".", # full stop
"\"", "``", "`", # quotation marks
";", # semicolon
"?" # question mark
) or x == opts.ignore
elif opts.language == "ch":
def engine(x, y): return x in (
"锛�", "锛�",
"銆�", "銆�", "锛�",
"锛�",
"锛�",
"鈥�", "锛�", "锛�", "锛�", "锛�",
"鈥�", "鈥�", "鈥�", "鈥�",
"銆�", "銆�", "銆�", "銆�", "銆�", "銆�", "銆�", "銆�",
"涓€涓€", "鈥曗€�", "鈥�",
) or x == opts.ignore
# elif opts.language == "universal":
# def engine(
# x, y): return len(
# x.decode("utf-8").translate(UNICODEPUNC)) == 0 or x == opts.ignore
elif opts.language == "chen2014en":
def engine(x, y): return y in set(["''", "``", ",", ".", ":"])
elif opts.language == "chen2014ch":
def engine(x, y): return y in set(['PU'])
else:
print >> sys.stderr, "Unknown language"
print >> sys.stderr, "valid language : { universal[default] , en , ch , chen2014en , chen2014ch }"
sys.exit(1)
reference_dataset = open(opts.reference, "r",encoding='utf-8').read().strip().split("\n\n")
answer_dataset = open(opts.answer, "r",encoding='utf-8').read().strip().split("\n\n")
assert len(reference_dataset) == len(
answer_dataset), "Number of instance unequal."
nr_total_gold_rels = 0
nr_total_test_rels = 0
nr_total_right_heads = 0
nr_total_right_deprels = 0
nr_sentence = len(reference_dataset)
nr_right_sentence_head = 0
nr_right_sentence_deprel = 0
nr_total_gold_non_local = 0
nr_total_test_non_local = 0
nr_total_right_heads_non_local = 0
nr_total_right_deprel_non_local = 0
for reference_data, answer_data in zip(reference_dataset, answer_dataset):
reference_lines = reference_data.split("\n")
answer_lines = answer_data.split("\n")
reference_lines_len = len(reference_lines)
answer_lines_len = len(answer_lines)
reference_stat_data = stat_one_tree(reference_lines)
answer_stat_data = stat_one_tree(answer_lines)
assert(len(reference_stat_data) == len(answer_stat_data))
(nr_one_gold_rels,
nr_one_test_rels,
nr_one_head_right,
nr_one_deprel_right,
nr_one_gold_non_local,
nr_one_test_non_local,
nr_one_head_non_local_right,
nr_one_deprel_non_local_right,
sentence_all_heads_is_right,
sentence_all_deprels_is_right) = stat_gold_and_test_data(reference_stat_data,
answer_stat_data)
nr_total_gold_rels += nr_one_gold_rels
nr_total_test_rels += nr_one_test_rels
nr_total_right_heads += nr_one_head_right
nr_total_right_deprels += nr_one_deprel_right
nr_total_gold_non_local += nr_one_gold_non_local
nr_total_test_non_local += nr_one_test_non_local
nr_total_right_heads_non_local += nr_one_head_non_local_right
nr_total_right_deprel_non_local += nr_one_deprel_non_local_right
if sentence_all_heads_is_right:
nr_right_sentence_head += 1
if sentence_all_deprels_is_right:
nr_right_sentence_deprel += 1
LP = float(nr_total_right_deprels) / \
nr_total_test_rels if nr_total_test_rels != 0 else INF
LR = float(nr_total_right_deprels) / \
nr_total_gold_rels if nr_total_gold_rels != 0 else INF
LF = float(2 * nr_total_right_deprels) / (nr_total_test_rels +
nr_total_gold_rels) if (nr_total_gold_rels + nr_total_test_rels) != 0 else INF
TP = nr_total_right_deprels
FP = nr_total_test_rels - nr_total_right_deprels
FN = nr_total_gold_rels - nr_total_right_deprels
NLP = float(nr_total_right_deprel_non_local) / \
nr_total_test_non_local if nr_total_test_non_local != 0 else INF
NLR = float(nr_total_right_deprel_non_local) / \
nr_total_gold_non_local if nr_total_gold_non_local != 0 else INF
NLF = float(2 * nr_total_right_deprel_non_local) / (nr_total_test_non_local +
nr_total_gold_non_local) if (nr_total_test_non_local + nr_total_gold_non_local) != 0 else INF
UP = float(nr_total_right_heads) / \
nr_total_test_rels if nr_total_test_rels != 0 else INF
UR = float(nr_total_right_heads) / \
nr_total_gold_rels if nr_total_gold_rels != 0 else INF
UF = float(2 * nr_total_right_heads) / (nr_total_test_rels + \
nr_total_gold_rels) if (nr_total_gold_rels + nr_total_test_rels) != 0 else INF
NUP = float(nr_total_right_heads_non_local) / \
nr_total_test_non_local if nr_total_test_non_local != 0 else INF
NUR = float(nr_total_right_heads_non_local) / \
nr_total_gold_non_local if nr_total_gold_non_local != 0 else INF
NUF = float(2 * nr_total_right_heads_non_local) / (nr_total_test_non_local +
nr_total_gold_non_local) if (nr_total_test_non_local + nr_total_gold_non_local) != 0 else INF
LM = float(nr_right_sentence_deprel) / \
nr_sentence if nr_sentence != 0 else INF
UM = float(nr_right_sentence_head) / \
nr_sentence if nr_sentence != 0 else INF
if opts.debug:
print("{0}{1}{0}".format("-" * 15, "statistic info"))
print("puncuation ingoring mode : {0}".format(opts.language))
print("total gold rels : {0}".format(nr_total_gold_rels))
print("total test rels : {0}".format(nr_total_test_rels))
print("total right heads : {0}".format(nr_total_right_heads))
print("total right deprels : {0}".format(nr_total_right_deprels))
print("total gold non-local : {0}".format(nr_total_gold_non_local))
print("total test non-local : {0}".format(nr_total_test_non_local))
print("total right head(non-local) : {0}".format(nr_total_right_heads_non_local))
print("total right deprels(non-local) : {0}".format(nr_total_right_deprel_non_local))
print("total sentence : {0}".format(nr_sentence))
print("total sentence with right head : {0}".format(nr_right_sentence_head))
print("total sentence with right label : {0}".format(nr_right_sentence_deprel))
print("{0}{0}{0}".format("-" * 15))
print("TP: {}, FP: {}, FN: {}".format(TP, FP, FN))
print("{0:^10}{1:^10}{2:^10}{3:^10}{4:^10}{5:^10}{6:^10}{7:^10}{8:^10}{9:^10}".format(
"LP", "LR", "LF", "NLF", "UP", "UR", "UF", "NUF", "LM", "UM"))
print("{0[0]:^10}{0[1]:^10}{0[2]:^10}{0[3]:^10}{0[4]:^10}{0[5]:^10}{0[6]:^10}{0[7]:^10}{0[8]:^10}{0[9]:^10}".format(
list(map(lambda x: "{:.2f}%".format(x * 100), [LP, LR, LF, NLF, UP, UR, UF, NUF, LM, UM]))))
```
#### File: Chinese_Semantic_Dependency_Parser_with_knowledge/inject_konwledge/clip.py
```python
def conll(fr):
inject_entity_num=0
name=fr.split('/')[-1].split('.')[0]
print(name)
is_text = False
if 'text' in name:
is_text=True
fr = open(fr, 'r', encoding="utf")
fw = open('./'+name+'.conllu_ner', mode='w', encoding='utf')
sents=fr.read().split('\n\n')
sent_num=len(sents)
for sent_id,sent in enumerate(tqdm(sents)):
words=[]
postags=[]
entities=[]
boundary_ori=[]
boundary_new=[]
head0=[]
rel0=[]
rels=[]
entity_s_e=[]
lines = sent.split('\n')
for i,line in enumerate(lines):
if not line:
continue
line=line.split('\t')
words.append(line[1])
postags.append(line[3])
head0.append(line[6])
rel0.append(line[7])
rels.append(line[8])
if i==0:
boundary_ori.append(len(line[1]))
else:
boundary_ori.append(boundary_ori[-1]+len(line[1]))
ori_sent=''.join(words)
split_sent= jieba.lcut(ori_sent)
for i,token in enumerate(split_sent):
if i==0:
boundary_new.append(len(token))
else:
boundary_new.append(boundary_new[-1]+len(token))
if len(token)<2:
continue
entitie = lookup_table.get(token)
if entitie==None:
continue
entitie = ','.join(list(entitie))
entities.append((token,entitie))
entity_s_e.append((boundary_new[-1]-len(token)+1,boundary_new[-1])) # 从1开始
entity_num=0
for index, pos in enumerate(postags):
entity_info = "_"
one_word=False # 是否是单个词的实体
flag = False # 是否能找到本体本体
for seidx, (s,e) in enumerate(entity_s_e):
# if words[index]=='澳大利亚':
# print(s,e)
# print(boundary_ori[index])
if index==0 and e==boundary_ori[index]:
for kw in key_word:
if kw in entities[seidx][1]:
entity_info= kw
flag = True
one_word=True
break
if not flag:
continue
entity_s_e.pop(seidx)
entities.pop(seidx)
entity_num += 1
elif index>0 and s==boundary_ori[index-1]+1 and e==boundary_ori[index]:
for kw in key_word:
if kw in entities[seidx][1]:
entity_info= kw
flag = True
one_word = True
break
if not flag:
continue
entity_s_e.pop(seidx)
entities.pop(seidx)
entity_num += 1
elif s<=boundary_ori[index] and e>boundary_ori[index]:
# if not is_text:
for kw in key_word:
if kw in entities[seidx][1]:
entity_info= kw
flag = True
break
if not flag:
entity_info = '搭配'#+str(entity_num)
elif s<boundary_ori[index] and e==boundary_ori[index]:
# if not is_text:
for kw in key_word:
if kw in entities[seidx][1]:
entity_info= kw
flag = True
break
if not flag:
entity_info = '搭配'#+str(entity_num)
entity_s_e.pop(seidx)
entities.pop(seidx)
entity_num += 1
if entity_info != '搭配':
inject_entity_num+=1
if entity_info!='_':
entity_info=own_kvs.get(entity_info)
if one_word and entity_info in own_pos and pos_map.get(pos) !=own_pos[entity_info]:
entity_info='_'
if one_word and entity_info not in own_pos :
entity_info='_'
if entity_info =='' or entity_info ==None:
entity_info='_'
if re.search(pattern1, words[index]):
entity_info='数目'
elif re.search(pattern2, words[index]) or re.search(pattern3, words[index]):
entity_info='时间'
# print(entity_info+ words[index])
write_line=str(index + 1) + "\t" + words[index] + "\t" + words[index] + "\t" + pos + "\t" + pos + '\t_\t'+ head0[index]+"\t"+rel0[index]+"\t"+ rels[index] +"\t"+entity_info
# if write_line.split('\t')!=10:
# print(write_line)
# break
fw.write(write_line+"\n")
fw.write('\n')
fw.close()
print("加入知识的词数 :"+str(inject_entity_num))
files = glob('/data/private/ldq/projects/Parser_with_knowledge/dataset/*.conllu')
for file in files:
print(file)
conll(file)
```
#### File: Chinese_Semantic_Dependency_Parser_with_knowledge/inject_konwledge/inject_pku.py
```python
from tqdm import tqdm
import pkuseg
from glob import glob
fr=open('owns','r',encoding='utf')
owns=fr.read().split('\n')
print(len(owns))
key_word=[
'处所', '信息', '自然物', '鸟', '量化属性', '性格', '相对时间', '器具', '电器', '药物', '自然现象', '时段', '身份', '模糊属性', '职业', '爬行动物',
'票据', '地表', '交通工具', '抽象空间', '具体空间', '事件', '集体', '人类', '非身体构件', '时点', '外形', '专名', '昆虫', '天体', '姓名', '建筑',
'文具', '机构', '水域物', '人为事物', '化合物', '兽', '事件构件', '时间', '其他抽象事物', '非生物', '计算机软件', '理论', '人名', '地貌', '抽象事物', '化妆品',
'运动器', '动机', '气象', '创作物', '衣服', '人群', '生物', '身体构件', '方法', '空间', '庄稼', '非生物构件', '衣物', '树', '绝对时间', '可视现象',
'事性', '用具', '钱财', '矿物', '材料', '过程', '领域', '可食物', '事物', '事理', '属性', '团体', '自然事物', '排泄物', '微生物', '符号', '性能', '物性',
'计算机硬件', '食物', '作品', '人工物', '植物', '乐器', '可听现象', '技术', '泛称', '服饰', '方位', '具体事物', '构件', '外观', '所处',
'鱼', '运动器械', '生理', '运动器具', '证书', '法规', '事情', '人性', '亲属', '元素', '个人', '颜色', '地理', '动物', '心理特征', '家具', '情感', '意识',
'职业','组织机构', '组织','机构', '地名','地点' '城市', '数量', '疾病', '器械', '称谓', '时间', '职务', '工具', '手术', '车辆', '药物', '用具', '数目', '地点','公司']
pass_own=['音乐作品','网络小说','娱乐作品','言情小说','娱乐','流行音乐','小说作品','娱乐人物','歌词','出版物','书籍', '文学作品','音乐','电影','文学','软件',
'美术作品','艺术作品','美术','互联网','网站','游戏','娱乐','电视剧','科学','字词,成语,语言','词语','字词,语言']
# 字词,语言 若只是一个词 pass 俩以上加 不在这里进行处理
# 葡萄牙人4600
# save_owns=set()
# for own in owns:
# for k in key_word:
# if k in own:
# save_owns.add(own)
def create_lookup_table():
lookup_table = {}
owns=set()
with open('../corpus/kg_9minllion', 'r', encoding='utf-8') as f:
for line in tqdm(f):
try:
subj, obje = line.strip().split("\t")
except:
print("[KnowledgeGraph] Bad spo:", line)
value = []
for ob in obje.split(','):
if ob in pass_own:
value=None
break
value.append(ob)
if value==None:
continue
value=','.join(value)
# if len(value)>16:
# value=value[:16]
if value and len(subj)>1:
if subj in lookup_table.keys():
lookup_table[subj].append(value)
else:
lookup_table[subj] = [value]
return lookup_table
lookup_table = create_lookup_table()
user_dict= list(lookup_table.keys())
# user_dict='/data/private/ldq/projects/data_ner/mydict'
tokenizer = pkuseg.pkuseg(model_name="default", postag=False, user_dict=user_dict)
def conll(fr):
name=fr.split('/')[-1].split('.')[0]
print(name)
fr = open(fr, 'r', encoding="utf")
fw = open('./'+name+'.conllu_ner', mode='w', encoding='utf')
sents=fr.read().split('\n\n')
for sent in tqdm(sents):
words=[]
postags=[]
entities=[]
boundary_ori=[]
boundary_new=[]
head0=[]
rel0=[]
rels=[]
entity_s_e=[]
lines = sent.split('\n')
for i,line in enumerate(lines):
if not line:
continue
line=line.split('\t')
words.append(line[1])
postags.append(line[3])
head0.append(line[6])
rel0.append(line[7])
rels.append(line[8])
if i==0:
boundary_ori.append(len(line[1]))
else:
boundary_ori.append(boundary_ori[-1]+len(line[1]))
ori_sent=''.join(words)
split_sent = tokenizer.cut(ori_sent)
for i,token in enumerate(split_sent):
if i==0:
boundary_new.append(len(token))
else:
boundary_new.append(boundary_new[-1]+len(token))
entitie = lookup_table.get(token)
if entitie==None:
continue
entitie = ','.join(list(entitie))
entities.append((token,entitie))
entity_s_e.append((boundary_new[-1]-len(token)+1,boundary_new[-1])) # 从1开始
for index, pos in enumerate(postags):
entity_info = "_"
for seidx, (s,e) in enumerate(entity_s_e):
if index>0 and s==boundary_ori[index-1] and e==boundary_ori[index]:
for kw in key_word:
if kw in entities[seidx][1]:
entity_info= kw
entity_s_e.pop(seidx)
entities.pop(seidx)
break
else:
entity_info = "_"
break
elif s<=boundary_ori[index] and e>boundary_ori[index]:
flag=False
for kw in key_word:
if kw in entities[seidx][1]:
entity_info= kw
flag = True
break
if not flag:
entity_info = '词组'+str(seidx)
# entity_info=entities[seidx][1]
elif s<boundary_ori[index] and e==boundary_ori[index]:
flag = False
for kw in key_word:
if kw in entities[seidx][1]:
entity_info= kw
flag = True
break
if not flag:
entity_info = '词组'+str(seidx)
# entity_info=entities[seidx][1]
entity_s_e.pop(seidx)
entities.pop(seidx)
fw.write(str(index + 1) + "\t" + words[index] + "\t" + words[index] + "\t" + pos + "\t" + pos + '\t_\t'+ head0[index]+"\t"+rel0[index]+"\t"+ rels[index] +"\t"+entity_info+"\n")
fw.write('\n')
# fw.write(' '.join(str(entities))+"\n")
fw.close()
if __name__ == '__main__':
owns=set()
files = glob('/data/private/ldq/projects/Parser_with_knowledge/dataset/*.conllu')
for file in files:
print(file)
conll(file)
```
#### File: Chinese_Semantic_Dependency_Parser_with_knowledge/model/joint_train.py
```python
import torch
import torch.nn as nn
from utils.utils import logger, seed_everything
from model.criterion import criterion
from model.optimizer import get_optimizer
from torch.nn.utils import clip_grad_norm_
from modules.sdp_decoder import sdp_decoder, parse_semgraph
import model.cal_las_uas as sdp_scorer
from tensorboardX import SummaryWriter
def unpack_batch(batch, use_cuda=False):
""" Unpack a batch from the data loader. """
input_ids = batch[0]
input_mask = batch[1]
segment_ids = batch[2]
boundary_ids = batch[3]
pos_ids = batch[4]
rel_ids = batch[5]
knowledge_feature = batch[6]
bio_ids = batch[1]
# knowledge_adjoin_matrix = batch[7]
# know_segment_ids = batch[6]
# know_input_ids = batch[7]
# know_input_mask = batch[8]
# knowledge_feature = (batch[6], batch[7], batch[8])
return input_ids, input_mask, segment_ids, boundary_ids, pos_ids, rel_ids, knowledge_feature,bio_ids#,knowledge_adjoin_matrix
class Trainer(object):
def __init__(self, args, model, batch_num=None):
self.model = model
self.use_cuda = args.use_cuda
self.device = args.device
self.fp16 = args.fp16
self.args = args
self.global_step = 0
self.batch_num = batch_num
self.optimizer, self.lr_scheduler,self.optimizer2,self.lr_scheduler2 = get_optimizer(args, batch_num, self.model)
if self.use_cuda:
self.model.cuda()
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
self.model, self.optimizer,self.optimizer2 = amp.initialize(model, self.optimizer, opt_level=args.fp16_opt_level)
def train(self, train_dataloader, dev_dataloader=None, dev_conllu_file=None):
summary_writer = SummaryWriter('board_log')
seed_everything(self.args.seed)
best_las = 0
best_uas = 0
self.args.eval_interval = int(self.batch_num / 2)
logger.info(f"eval_interval:{self.args.eval_interval}")
for epoch in range(self.args.epochs):
if best_las > 0.829:
self.args.eval_interval = 300
# logger.info(f"eval_interval:{self.args.eval_interval}")
for step, batch in enumerate(train_dataloader):
self.model.train()
self.global_step += 1
batch = tuple(t.to(self.device) for t in batch)
input_ids, input_mask, segment_ids, boundary_ids, pos_ids, rel_ids, knowledge_feature,bio_ids= unpack_batch(batch, self.use_cuda)
if self.global_step==0:
dummy_input=(input_ids, segment_ids,input_mask,pos_ids,boundary_ids,knowledge_feature)
summary_writer.add_graph(self.model, (dummy_input,))
head_scores, label_scores, max_len_of_batch, ner_scores= self.model(input_ids, token_type_ids=segment_ids,
attention_mask=input_mask, pos_ids=pos_ids,
boundary_ids=boundary_ids,
knowledge_feature=knowledge_feature,
bio_ids=bio_ids)
label_target = rel_ids[:, :max_len_of_batch, :max_len_of_batch]
head_target = label_target.ge(2).type_as(rel_ids) # VOCAB_PREFIX = [PAD, UNK, ROOT],root索引为2
tails = boundary_ids[:, :max_len_of_batch]
word_mask = torch.eq(tails, 0) # 填充的变为1
loss = criterion(head_scores, label_scores, head_target, label_target, word_mask, max_len_of_batch,
self.model.vocabs)
# if self.global_step%3!=0:
pad_zero = torch.cuda.LongTensor(bio_ids.size()[0],
max_len_of_batch) if torch.cuda.is_available() else torch.LongTensor(
bio_ids.size()[0], max_len_of_batch)
pad_zero *= 0
bio_ids = bio_ids[:, :max_len_of_batch]
bio_ids = torch.where(bio_ids >= 5, bio_ids, pad_zero)
del pad_zero
ner_loss = self.model.crf.neg_log_likelihood_loss(ner_scores, input_mask[:, :max_len_of_batch],bio_ids)
ner_loss /= float(self.args.batch_size)
summary_writer.add_scalar('ner_loss', ner_loss, self.global_step)
if ner_loss>0.5:
self.optimizer2.zero_grad()
ner_loss.backward()
self.optimizer2.step()
self.lr_scheduler2.step()
ner_scores.detach()
# logger.info('loss %s', loss)
summary_writer.add_scalar('parser_loss',loss, self.global_step)
if self.fp16:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
clip_grad_norm_(amp.master_params(self.optimizer), self.args.grad_clip_max_norm)
else:
loss.backward()
clip_grad_norm_(self.model.parameters(), self.args.grad_clip_max_norm)
self.optimizer.step()
self.lr_scheduler.step()
self.optimizer.zero_grad()
if self.global_step % self.args.eval_interval == 0 or self.global_step==self.batch_num*self.args.epochs:
LAS, UAS = self.predict(dev_dataloader, dev_conllu_file)
if LAS > best_las:
best_las = LAS
best_uas = UAS
self.save_model()
logger.warning(
f"epoch{epoch+1}, step:{self.global_step}-----LAS:{best_las:.4f},UAS:{UAS:.4f},loss {loss.item():.4f}")
else:
logger.info(f"LAS ,UAS in epoch{epoch+1},step{step+1}:{LAS:.4f},{UAS:.4f}")
summary_writer.add_scalar('LAS', LAS, self.global_step)
summary_writer.add_scalar('UAS', UAS, self.global_step)
for i, param_group in enumerate(self.optimizer.param_groups):
summary_writer.add_scalar(f'lr/group_{i}', param_group['lr'], self.global_step)
summary_writer.close()
logger.warning(f"Result in Dev set: LAS:{best_las:.4f},UAS:{best_uas:.4f}")
def predict(self, dev_dataloader, dev_conllu_file):
predictions = []
self.model.eval()
for step, batch in enumerate(dev_dataloader):
with torch.no_grad():
preds_batch = []
batch = tuple(t.to(self.device) for t in batch)
input_ids, input_mask, segment_ids, boundary_ids, pos_ids, rel_ids, knowledge_feature,bio_ids= unpack_batch(
batch,self.use_cuda)
head_scores, label_scores, max_len_of_batch, ner_scores= self.model(input_ids, token_type_ids=segment_ids,
attention_mask=input_mask, pos_ids=pos_ids,
boundary_ids=boundary_ids,
knowledge_feature=knowledge_feature,
bio_ids=bio_ids)
batch_size = head_scores.size(0)
tails = boundary_ids[:, :max_len_of_batch]
word_mask = torch.eq(tails, 0) # 填充的变为1
weights = torch.ones(batch_size, max_len_of_batch, max_len_of_batch, dtype=torch.float,
device=self.device)
weights = weights.masked_fill(word_mask.unsqueeze(1), 0) # 将填充的置0
weights = weights.masked_fill(word_mask.unsqueeze(2), 0)
weights = weights.unsqueeze(3)
# print(unlabeled_scores[0]) # 跑模型,评估时输出的大长串
head_probs = torch.sigmoid(head_scores).unsqueeze(3) # [100, 44, 44, 1]
label_probs = torch.softmax(label_scores, dim=3) # [100, 44, 44, 144]
semgraph_probs = head_probs * label_probs * weights # 因此解码时并非先解码弧,再单独判断标签,而是弧与标签的联合概率
preds_batch.append(semgraph_probs.detach().cpu().numpy()) # detach(),截断反向传播的梯度流
tail_mask = tails != 0
sentlens = torch.sum(tail_mask, 1).cpu().tolist()
semgraph = sdp_decoder(preds_batch[0], sentlens)
sents = parse_semgraph(semgraph, sentlens)
pred_sents = self.model.vocabs['rel'].parse_to_sent_batch(sents)
predictions += pred_sents
dev_conllu_file.set(['deps'], [dep for sent in predictions for dep in sent])
dev_conllu_file.write_conll(self.args.eval_temp_file)
UAS, LAS = sdp_scorer.score(self.args.eval_temp_file, self.args.gold_file)
return LAS, UAS
def save_model(self):
model_to_save = self.model.module if hasattr(self.model, 'module') else self.model
# model_to_save.save_pretrained(str(self.args.saved_model_path))
saved_model_file = self.args.saved_model_path + '/pytorch_model.bin'
torch.save(model_to_save.state_dict(), saved_model_file)
output_config_file = self.args.saved_model_path + '/config.json'
with open(str(output_config_file), 'w') as f:
f.write(model_to_save.encoder.bert_model.config.to_json_string())
```
#### File: Chinese_Semantic_Dependency_Parser_with_knowledge/modules/biaffine.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class PairwiseBilinear(nn.Module):
''' A bilinear module that deals with broadcasting for efficient memory usage.
Input: tensors of sizes (N x L1 x D1) and (N x L2 x D2)
Output: tensor of size (N x L1 x L2 x O)'''
def __init__(self, input1_size, input2_size, output_size, bias=True): # self.W_bilin = PairwiseBilinear(input1_size + 1, input2_size + 1, output_size)
super().__init__() # 7 7 7
self.input1_size = input1_size
self.input2_size = input2_size
self.output_size = output_size
self.weight = nn.Parameter(torch.Tensor(input1_size, input2_size, output_size))
self.bias = nn.Parameter(torch.Tensor(output_size)) if bias else 0
def forward(self, input1, input2): # # [2*3*7],[2*3*7]
input1_size = list(input1.size())
input2_size = list(input2.size())
output_size = [input1_size[0], input1_size[1], input2_size[1], self.output_size]
# print("self.W_bilin(input1, input2 self.W_bilin(input1, input2 self.W_bilin(input1, input2")
# print(input1.view(-1, input1_size[-1]).shape)
# print(self.weight.view(-1, self.input2_size * self.output_size).shape)
# ((N x L1) x D1) * (D1 x (D2 x O)) -> (N x L1) x (D2 x O)
#in1 6*7 3100*401 # in1 * (in2 * out) 维 7*49 401*56140
intermediate = torch.mm(input1.view(-1, input1_size[-1]), self.weight.view(-1, self.input2_size * self.output_size)) # mm 矩阵乘法
# (N x L2 x D2) -> (N x D2 x L2)
input2 = input2.transpose(1, 2) # 转置 2*7*3
# (N x (L1 x O) x D2) * (N x D2 x L2) -> (N x (L1 x O) x L2)
output = intermediate.view(input1_size[0], input1_size[1] * self.output_size, input2_size[2]).bmm(input2) # bmm 矩阵乘法
# (N x (L1 x O) x L2) -> (N x L1 x L2 x O)
output = output.view(input1_size[0], input1_size[1], self.output_size, input2_size[1]).transpose(2, 3)
# torch.Size([73, 50, 50, 1]) 是否是头节点;torch.Size([73, 50, 50, 140]) 140个标签
return output
class BiaffineScorer(nn.Module):
def __init__(self, input1_size, input2_size, output_size):
super().__init__()
self.W_bilin = nn.Bilinear(input1_size + 1, input2_size + 1, output_size) # Applies a bilinear transformation to the incoming data: y = x1 * W * x2 + b`
# +1原因:输入数据调用此函数前最后维度上均与1进行了拼接,维度增加了1
self.W_bilin.weight.data.zero_()
self.W_bilin.bias.data.zero_()
def forward(self, input1, input2):
# print(input1)
# print(input1.size())
input1 = torch.cat([input1, input1.new_ones(*input1.size()[:-1], 1)], len(input1.size())-1) # cat第一个参数类型元组或列表均可 any python sequence of tensors
input2 = torch.cat([input2, input2.new_ones(*input2.size()[:-1], 1)], len(input2.size())-1) # 为什么拼接???????????????????????????????
return self.W_bilin(input1, input2) # 拼接结果最后一维 即hidden维度所在层次增加了一位 1
class PairwiseBiaffineScorer(nn.Module): # 6 6 7
def __init__(self, input1_size, input2_size, output_size):
super().__init__() # 7 7 7
self.W_bilin = PairwiseBilinear(input1_size + 1, input2_size + 1, output_size) # 这里的size为输入数据的最后一维 hidden_size
self.W_bilin.weight.data.zero_()
self.W_bilin.bias.data.zero_()
# unlabeled(lstm_outputs,lstm_outputs)
def forward(self, input1, input2):# [2*3*6],[2*3*6] [:-1]切片索引到-1,则不含-1,因此少了最后一维
input1 = torch.cat([input1, input1.new_ones(*input1.size()[:-1], 1)], len(input1.size())-1) # [ 2*3*7],[2*3*6]与[2*3*1] 在最后一维拼接,
input2 = torch.cat([input2, input2.new_ones(*input2.size()[:-1], 1)], len(input2.size())-1) # [2*3*7]
# a=self.W_bilin(input1, input2)
# print(a.shape) # torch.Size([73, 50, 50, 1]) torch.Size([73, 50, 50, 140]) 是否是头节点,140个标签
return self.W_bilin(input1, input2)
class DeepBiaffineScorer(nn.Module): # self.deprel = DeepBiaffineScorer(2 * self.args['hidden_dim'], 2 * self.args['hidden_dim'], self.args['deep_biaff_hidden_dim'], len(vocab['graph']), pairwise=True, dropout=args['dropout'])
# 4 5 6 7
def __init__(self, input1_size, input2_size, hidden_size, output_size, hidden_func=F.relu, dropout=0, pairwise=True):
super().__init__()
self.W1 = nn.Linear(input1_size, hidden_size)
self.W2 = nn.Linear(input2_size, hidden_size)
self.hidden_func = hidden_func
if pairwise: # 6 6 7
self.scorer = PairwiseBiaffineScorer(hidden_size, hidden_size, output_size)
else:
self.scorer = BiaffineScorer(hidden_size, hidden_size, output_size)
self.dropout = nn.Dropout(dropout)
def forward(self, input1, input2): # dropout, tanh, linear 结果 计算得分
# print(input1) # 2*3*6 2*3*6
return self.scorer(self.dropout(self.hidden_func(self.W1(input1))), self.dropout(self.hidden_func(self.W2(input2))))
if __name__ == "__main__":
x1 = torch.randn(2, 3, 4)
x2 = torch.randn(2, 3, 5)
scorer = DeepBiaffineScorer(4, 5, 6, 7)
# print(scorer(x1, x2))
res = scorer(x1, x2)
print(res)
# print(res.size())
```
#### File: Chinese_Semantic_Dependency_Parser_with_knowledge/sequence_tagging/trans_bio.py
```python
from glob import glob
def trans_to_bio(fr):
inject_entity_num=0
name=fr.split('/')[-1].split('.')[0]
print(name)
fr = open(fr, 'r', encoding="utf")
fw = open('./'+name+'.conllu_bio', mode='w', encoding='utf')
sents=fr.read().split('\n\n')
sent_num=len(sents)
for sent_id,sent in enumerate(sents):
words=[]
postags=[]
entities=[]
head0=[]
rel0=[]
rels=[]
pre_label=None
lines = sent.split('\n')
for i,line in enumerate(lines):
if not line:
continue
line=line.split('\t')
words.append(line[1])
postags.append(line[3])
head0.append(line[6])
rel0.append(line[7])
rels.append(line[8])
entities.append(line[9])
sent_bio=[]
for index, entity in enumerate(entities):
if entity !=pre_label and index!=len(entities)-1:
pre_label=entity
if entity!='_' and entity!='搭配':
sent_bio.append( 'B-' +entity)
else:
sent_bio.append('O')
elif entity ==pre_label:
if entity!='_' and entity!='搭配':
sent_bio.append( 'I-' +entity)
else:
sent_bio.append('O')
elif index==len(entities)-1:
if entity!='_' and entity!='搭配':
sent_bio.append( 'I-' +entity)
else:
sent_bio.append('O')
# print(sent_bio)
for index, entity in enumerate(entities):
write_line=str(index + 1) + "\t" + words[index] + "\t" + words[index] + "\t" + postags[index] + "\t" + postags[index] + "\t"+ sent_bio[index]+"\t"+ head0[index]+"\t"+rel0[index]+"\t"+ rels[index] +'\t'+entities[index]
fw.write(write_line+"\n")
fw.write('\n')
fw.close()
if __name__ == '__main__':
files = glob('./*.conllu_ner')
for file in files:
print(file)
trans_to_bio(file)
```
#### File: Chinese_Semantic_Dependency_Parser_with_knowledge/utils/batch_trans_sem16.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from glob import glob
def conllu_to_sem16(conllu_filename):
with open(conllu_filename, encoding='utf-8') as f, open(conllu_filename+'.sem16', 'w', encoding='utf-8') as g:
buff = []
for line in f:
line = line.strip('\n')
items = line.split('\t')
if len(items) == 10:
# Add it to the buffer
buff.append(items)
elif buff:
for i, items in enumerate(buff):
if items[8] != '_':
nodes = items[8].split('|')
for node in nodes:
words = items
# copy xpos to upos
words[3] = words[4]
node = node.split(':', 1)
node[0] = int(node[0])
words[6], words[7], words[8] = str(node[0]), node[1], '_'
g.write('\t'.join(words) + '\n')
else:
g.write('\t'.join(items) + '\n')
g.write('\n')
buff = []
#***************************************************************
if __name__ == '__main__':
files = glob('dataset/*for_sdp.conllu')
print(files)
for file in files:
conllu_to_sem16(file)
``` |
{
"source": "A1014280203/ActionReClient",
"score": 3
} |
#### File: A1014280203/ActionReClient/Model.py
```python
from PyQt5.QtCore import QObject, pyqtSignal
class Model(QObject):
# 数据更新的信号
dataChangedSignal = pyqtSignal()
def __init__(self, logger):
super(Model, self).__init__()
self.__data = None
self.logger = logger
def setData(self, data):
self.__data = data
# 数据更新完成,通知view
self.run()
# log example
self.logger.info('from {0}'.format(self.__class__.__name__))
def getData(self):
return self.__data
def run(self):
'''发出数据更新的信号
:return: 无
'''
self.dataChangedSignal.emit()
if __name__ == '__main__':
pass
```
#### File: A1014280203/ActionReClient/Utils.py
```python
import requests
import numpy as np
import os
import cv2
import json
import time
capture = cv2.VideoCapture(0)
def fetchData():
resp = requests.get('')
if resp.status_code == 200:
pose = np.load('./1.npy', allow_pickle=True)[0, :2, 0, :].transpose(1, 0)
return {'img_b': resp.content,
'pose': pose,
'boundingBox': [39, 917, 336, 886],
'nameAndAction': ['葛某', '走']}
else:
return None
def getLocalData():
imgs = os.listdir('./data/20_imga/')
poses = np.load('./data/20.npy', allow_pickle=True)
i = 0
actions = ["唱", "跳", "rap", "篮球"]
def iner():
nonlocal imgs, i, poses
if i >= len(imgs):
i = 0
with open('./data/20_imga/' + imgs[i], 'rb') as fr:
img_b = fr.read()
x, y, w, h = where2Cut(poses[0, :2, i, :])
pose = poses[0, :2, i, :].transpose(1, 0)
i += 1
return {'img_b': img_b,
'pose': pose,
'boundingBox': [[y, x, w, h]],
'nameAndAction': [['葛某', actions[int((i/5)) % 4]], ["蔡某", actions[int((i/5+1)) % 4]]]}
return iner
def where2Cut(a: [[], []]):
x, y = int(min(a[0]) * 1920), int(min(a[1]) * 1080)
w = int((max(a[0]) - min(a[0])) * 1920)
h = int((max(a[1]) - min(a[1])) * 1080)
offsetX = 100
offsetY = 50
x = x - offsetX if x - offsetX > 0 else 0
y = y - offsetY if y - offsetY > 0 else 0
w = w + 2 * offsetX if x + w + 2 * offsetX < 1920 else 0
h = h + 2 * offsetY if y + h + 2 * offsetY < 1080 else 0
w, h = max(w, h), max(w, h)
return x, y, w, h
class BehaviorRecord(object):
__records = dict()
__time = 0
__fps = 0
@classmethod
def getRecords(cls):
return cls.__records
@classmethod
def setFps(cls, fps):
cls.__fps = fps
@classmethod
def lastActionOf(cls, name: str):
if name not in cls.__records:
return None
return cls.__records[name][-1]["action"]
@classmethod
def record(cls, nameAndAction: [[], ]):
"""
records = {
"name": [
{
"action": "唱",
"start": 0,
"end": 100
},
],
}
"""
for n, a in nameAndAction:
lastAction = cls.lastActionOf(n)
if lastAction is None:
cls.__records[n] = [{"action": a, "start": cls.__time, "end": cls.__time}, ]
elif lastAction == a:
cls.__records[n][-1]["end"] = cls.__time
else:
cls.__records[n].append({"action": a, "start": cls.__time, "end": cls.__time})
cls.__time += 1
@classmethod
def theDurationOf(cls, d: dict):
"""
:param d: {
"action": "唱",
"start": 0,
"end": 100
}
:return:
"""
start, end = d["start"], d["end"]
if cls.__fps == 0:
return "0秒"
d = (end - start)/cls.__fps
if d > 3600:
return f"{int(d//3600)}小时"
elif d > 60:
return f"{int(d//60)}分"
else:
return f"{int(d)}秒"
# for test
fetchData = getLocalData()
# get_data = lambda: None
if __name__ == '__main__':
while capture.isOpened():
# 获取一帧
ret, frame = capture.read()
print(capture.get(5))
if ret:
cv2.imshow('frame', frame)
else:
break
if cv2.waitKey(1) == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
``` |
{
"source": "A1014280203/weread",
"score": 3
} |
#### File: A1014280203/weread/model.py
```python
from sqlalchemy import Column, String, create_engine, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
engine = create_engine("mysql+pymysql://user:password@ip:port/weread?charset=utf8")
DBSession = sessionmaker(bind=engine)
class Post(Base):
__tablename__ = "post"
# post info
pid = Column(Integer(), autoincrement=True, index=True, unique=True)
originalId = Column(String(128), primary_key=True)
createTime = Column(Integer())
doc_url = Column(String(256))
title = Column(String(128))
content = Column(String(256))
state = Column(Integer(), default=0)
# mp info
mp_name = Column(String(64))
avatar = Column(String(256))
bookId = Column(String(128))
class Book(Base):
__tablename__ = "book"
bid = Column(Integer(), primary_key=True)
bookId = Column(String(128))
share_url = Column(String(256))
state = Column(Integer(), default=0)
last_update = Column(Integer(), default=0)
class DBC(object):
def __init__(self):
self.__s = None
self.refresh_session()
def __del__(self):
self.close_session()
def refresh_session(self):
self.__s = DBSession()
def close_session(self):
if self.__s:
self.__s.close()
self.__s = None
def add(self, obj):
if self.__s is None:
self.refresh_session()
self.__s.add(obj)
def __pre_check(self, data: dict):
"""
:param data: {field: value}
:return: True for pass
"""
pass
def update(self, cls, col, new_d):
if self.__s is None:
self.refresh_session()
self.__s.query(cls).filter(getattr(cls, col) == new_d[col]).update(new_d)
def query_all(self, cls):
return self.__s.query(cls).all()
def commit(self):
"""
documented as bad form
:return:
"""
try:
self.__s.commit()
except Exception as e:
self.__s.rollback()
return False
return True
@staticmethod
def orm2dict(rows: list):
"""
:param rows: must be iterable
:return:
"""
if not len(rows):
return rows
cols = [x.name for x in rows[0].__mapper__.columns]
data = []
for row in rows:
_d = {name: getattr(row, name) for name in cols}
data.append(_d)
return data
def query_all_pretty(self, cls):
rows = self.query_all(cls)
return self.orm2dict(rows)
``` |
{
"source": "a1026360/alpha-zero-general",
"score": 3
} |
#### File: environment/keras/NNet.py
```python
import os
import time
import numpy as np
import sys
from utils import *
from NeuralNet import NeuralNet
from .ChessNNet import ChessNNet as onnet
sys.path.append('..')
args = TrainingConfig({
'lr': 0.005,
'dropout': 0.2,
'epochs': 14,
'batch_size': 16,
'cuda': True,
'num_channels': 256,
})
class NNetWrapper(NeuralNet):
def __init__(self, game):
self.nnet = onnet(game, args)
self.board_x, self.board_y = game.getBoardSize()
self.action_size = game.getActionSize()
def train(self, examples):
"""
examples: list of examples, each example is of form (board, pi, v)
"""
input_boards, target_pis, target_vs = list(zip(*examples))
input_boards = np.asarray(input_boards)
target_pis = np.asarray(target_pis)
target_vs = np.asarray(target_vs)
self.nnet.model.fit(x=input_boards, y=[target_pis, target_vs], batch_size=args.batch_size, epochs=args.epochs, verbose=2)
def predict(self, board):
"""
board: np array with board
"""
# timing
#start = time.time()
# preparing input
board = board[np.newaxis, :, :]
# run
pi, v = self.nnet.model.predict(board)
#print('PREDICTION TIME TAKEN : {0:03f}'.format(time.time()-start))
return pi[0], v[0]
def save_checkpoint(self, folder='checkpoint', filename='checkpoint.h5'):
filepath = os.path.join(folder, filename)
if not os.path.exists(folder):
print(f"Checkpoint Directory does not exist! Making directory {folder}")
os.mkdir(folder)
else:
print(f"Checkpoint Directory exists (at '{os.path.abspath(filepath)}').")
self.nnet.model.save_weights(filepath)
def load_checkpoint(self, folder='checkpoint', filename='checkpoint.h5'):
filepath = os.path.join(folder, filename)
if not os.path.exists(filepath):
print(f"No model in path '{os.path.abspath(filepath)}'!")
raise Exception("No model in path!")
self.nnet.model.load_weights(filepath)
``` |
{
"source": "a1043332/sc_api",
"score": 2
} |
#### File: a1043332/sc_api/noxfile.py
```python
import nox
test_dependencies = [
"google-auth",
"google-auth-httplib2",
"mox",
"parameterized",
"pyopenssl",
"pytest",
"pytest-cov",
"webtest",
"coverage",
"unittest2",
"mock",
]
@nox.session(python=["3.7"])
def lint(session):
session.install("flake8")
session.run(
"flake8",
"googleapiclient",
"tests",
"--count",
"--select=E9,F63,F7,F82",
"--show-source",
"--statistics",
)
@nox.session(python=["2.7", "3.5", "3.6", "3.7"])
@nox.parametrize(
"oauth2client",
[
"oauth2client<2dev",
"oauth2client>=2,<=3dev",
"oauth2client>=3,<=4dev",
"oauth2client>=4,<=5dev",
],
)
def unit(session, oauth2client):
session.install(*test_dependencies)
session.install(oauth2client)
if session.python < "3.0":
session.install("django<2.0.0")
else:
session.install("django>=2.0.0")
session.install('.')
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
"--cov=googleapiclient",
"--cov=tests",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=85",
"tests",
*session.posargs,
)
```
#### File: samples/searchconsole/sqlInsertt.py
```python
import mysql.connector
from mysql.connector import Error
def sqlInsert(sql):
try:
connection = mysql.connector.connect(
host='localhost',
database='SC',
user='hdd',
password='<PASSWORD>')
if connection.is_connected():
db_Info = connection.get_server_info()
print("version", db_Info)
cursor = connection.cursor()
cursor.execute(sql)
connection.commit()
except Error as e:
print("error", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
print("closed")
``` |
{
"source": "a10networks/a10-cliconf-collection",
"score": 2
} |
#### File: plugins/modules/acos_facts.py
```python
from __future__ import (absolute_import, division, print_function)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: acos_facts
author: <NAME> (@hthompson6), <NAME> (@OmkarTelee-A10),
<NAME> (@afrin-chakure-a10), <NAME> (@NehaKembalkarA10)
short_description: Collect facts from remote devices running A10 ACOS
description:
- Collects a base set of device facts from a remote device that
is running ACOS. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will collect a base set of facts from the device
and can enable or disable collection of additional facts.
version_added: '2.9'
options:
gather_subset:
description:
- When supplied, this argument restricts the facts collected
to a given subset.
- Possible values for this argument include
all, default, hardware, config and interfaces
- Specify a list of comma seperated values (without spaces) to include
a larger subset.
required: false
type: list
default: 'all'
partition:
description:
- This argument is used to specify the partition name from
which you want to collect respective facts.
type: str
default: shared
notes:
- Tested against ACOS 4.1.1-P9
'''
EXAMPLES = r'''
tasks:
- name: Collect all the facts
a10.acos_cli.acos_facts:
gather_subset: all
- name: Collect only the config and default facts
a10.acos_cli.acos_facts:
gather_subset:
- config
- name: Do not collect hardware facts
a10.acos_cli.acos_facts:
gather_subset:
- "!hardware"
- name: Collect all the facts my_partition
a10.acos_cli.acos_facts:
partition: my_partition
gather_subset: all
'''
RETURN = r'''
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
ansible_net_model:
description: The model name returned from the device
returned: always
type: str
ansible_net_hostid:
description: The hostid returned from the device
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the remote device
returned: always
type: str
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_image:
description: The image file the device is running
returned: always
type: str
ansible_net_api:
description: The name of the transport
returned: always
type: str
ansible_net_python_version:
description: The Python version Ansible controller is using
returned: always
type: str
# hardware
ansible_net_memfree_mb:
description: The available free memory on the remote device in Mb
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in Mb
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
'''
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.a10.acos_cli.plugins.module_utils.network.a10.acos import \
run_commands
from ansible_collections.a10.acos_cli.plugins.module_utils.network.a10.facts.facts import \
Facts
class FactsArgs(object):
""" The arg spec for the acos_facts module """
argument_spec = {
'gather_subset': dict(default=['all'], type='list'),
'partition': dict(default='shared')
}
def main():
""" Main entry point for AnsibleModule """
argument_spec = FactsArgs.argument_spec
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if module.params['partition'].lower() != 'shared':
partition = module.params['partition']
out = run_commands(module, 'active-partition %s' % (partition))
if "does not exist" in str(out[0]):
module.fail_json(msg="Provided partition does not exist")
warnings = []
ansible_facts, additional_warnings = Facts(module).get_facts()
warnings.extend(additional_warnings)
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
```
#### File: network/a10/test_acos_config.py
```python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from mock import MagicMock, Mock
import os
from ansible_collections.a10.acos_cli.plugins.cliconf.acos import Cliconf
from ansible_collections.a10.acos_cli.plugins.modules import acos_config
from ansible_collections.a10.acos_cli.tests.unit.compat.mock import patch
from ansible_collections.a10.acos_cli.tests.unit.modules.utils import AnsibleFailJson
from ansible_collections.a10.acos_cli.tests.unit.modules.utils import set_module_args
from ansible_collections.a10.acos_cli.tests.unit.modules.network.a10.base import (
TestAcosModule, load_fixture)
class TestAcosConfigModule(TestAcosModule):
module = acos_config
def setUp(self):
super(TestAcosConfigModule, self).setUp()
self.mock_get_config = patch(
"ansible_collections.a10.acos_cli.plugins.modules.acos_config.get_config"
)
self.get_config = self.mock_get_config.start()
self.mock_get_connection = patch(
"ansible_collections.a10.acos_cli.plugins.modules.acos_config.get_connection"
)
self.get_connection = self.mock_get_connection.start()
self.conn = self.get_connection()
self.conn.edit_config = MagicMock()
self.conn.get_diff = MagicMock()
self.mock_run_commands = patch(
"ansible_collections.a10.acos_cli.plugins.modules.acos_config.run_commands"
)
self.run_commands = self.mock_run_commands.start()
self.src = os.path.join(os.path.dirname(
__file__), 'fixtures/show_config_file_commands.cfg')
self.backup_spec = {
"filename": "test_backup.cfg",
"dir_path": "fixtures/backup/"
}
self.cliconf_obj = Cliconf(MagicMock())
self.running_config = load_fixture("acos_running_config.cfg")
self.match = 'line'
self.diff_ignore_lines = 'none'
def tearDown(self):
super(TestAcosConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_run_commands.stop()
self.mock_get_connection.stop()
def load_fixtures(self, filename=None):
config_file = "acos_running_config.cfg"
self.get_config.return_value = load_fixture(config_file)
self.get_connection.edit_config.return_value = None
def test_acos_config_lines(self):
lines = ["ip dns primary 10.18.18.81"]
set_module_args(dict(lines=lines))
self.execute_module()
self.conn.get_diff = Mock(
return_value=self.cliconf_obj.get_diff(
candidate=lines, running=self.running_config, diff_match=self.match,
diff_ignore_lines=self.diff_ignore_lines
)
)
self.assertIn("ip dns primary 10.18.18.81",
self.conn.get_diff.return_value['config_diff'])
self.assertTrue(self.conn.edit_config.called)
def test_acos_config_multi_lines(self):
lines = ["ip dns primary 10.18.18.81", "member rs1-test 80",
"slb server server2-test 172.16.31.10"]
set_module_args(dict(lines=lines))
self.execute_module()
self.conn.get_diff = Mock(
return_value=self.cliconf_obj.get_diff(
candidate=lines, running=self.running_config, diff_match=self.match,
diff_ignore_lines=self.diff_ignore_lines
)
)
self.assertIn("ip dns primary 10.18.18.81",
self.conn.get_diff.return_value['config_diff'])
self.assertIn("member rs1-test 80",
self.conn.get_diff.return_value['config_diff'])
self.assertIn("slb server server2-test 172.16.31.10",
self.conn.get_diff.return_value['config_diff'])
self.assertTrue(self.conn.edit_config.called)
def test_acos_config_before(self):
lines = ["ip dns primary 10.18.18.19"]
set_module_args(dict(lines=lines, before=["show avcs"]))
self.execute_module()
commands = ["show avcs", "ip dns primary 10.18.18.19"]
self.conn.get_diff = Mock(
return_value=self.cliconf_obj.get_diff(
candidate=commands, running=self.running_config, diff_match=self.match,
diff_ignore_lines=self.diff_ignore_lines
)
)
self.assertIn("ip dns primary 10.18.18.19",
self.conn.get_diff.return_value['config_diff'])
self.assertIn(
"show avcs", self.conn.get_diff.return_value['config_diff'])
self.assertTrue(self.conn.edit_config.called)
def test_acos_config_after(self):
lines = ["ip dns primary 10.18.18.19"]
set_module_args(dict(lines=lines, after=["show avcs"]))
self.execute_module()
commands = ["ip dns primary 10.18.18.19", "show avcs"]
self.conn.get_diff = Mock(
return_value=self.cliconf_obj.get_diff(
candidate=commands, running=self.running_config, diff_match=self.match,
diff_ignore_lines=self.diff_ignore_lines
)
)
self.assertIn("ip dns primary 10.18.18.19",
self.conn.get_diff.return_value['config_diff'])
self.assertIn(
"show avcs", self.conn.get_diff.return_value['config_diff'])
self.assertTrue(self.conn.edit_config.called)
def test_acos_config_save_changed_false(self):
set_module_args(dict(save_when="changed"))
self.execute_module()
self.assertEqual(self.run_commands.call_count, 3)
self.assertEqual(self.conn.edit_config.call_count, 1)
args = self.run_commands.call_args_list
commands = [x[0][1] for x in args]
self.assertNotIn("write memory\r", commands)
def test_acos_config_save_always(self):
lines = ["ip dns primary 10.18.18.19"]
set_module_args(dict(lines=lines, save_when="always"))
self.execute_module()
self.assertEqual(self.run_commands.call_count, 4)
self.assertEqual(self.conn.edit_config.call_count, 1)
args = self.run_commands.call_args_list
commands = [x[0][1] for x in args]
self.assertIn("write memory\r", commands)
@patch("ansible_collections.a10.acos_cli.plugins.modules.acos_config.NetworkConfig")
def test_acos_config_save_no_modified(self, mock_networkConfig):
lines = ["ip dns primary 10.18.18.39"]
set_module_args(dict(lines=lines, save_when="modified"))
self.execute_module()
args = self.run_commands.call_args_list[-1][0][1]
self.assertEqual(args, ['show running-config', 'show startup-config'])
self.assertEqual(mock_networkConfig.call_count, 3)
commands = [x[0][1] for x in self.run_commands.call_args_list]
self.assertNotIn("write memory\r", commands)
@patch("ansible_collections.a10.acos_cli.plugins.modules.acos_config.NetworkConfig")
def test_acos_config_save_modified(self, mock_networkConfig):
running_config_fixture = Mock()
running_config_fixture.sha1 = "show running_config fixtures"
startup_config_fixture = Mock()
startup_config_fixture.sha1 = "show startup_config fixtures"
mock_networkConfig.side_effect = [
running_config_fixture, startup_config_fixture]
set_module_args(dict(save_when="modified"))
self.execute_module()
args = self.run_commands.call_args_list
commands = [x[0][1] for x in args]
self.assertIn("write memory\r", commands)
def test_acos_config_src(self):
set_module_args(dict(src=self.src))
self.execute_module()
self.assertTrue(self.conn.edit_config.called)
def test_acos_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn("__backup__", result)
@patch("ansible_collections.a10.acos_cli.plugins.modules.acos_config.run_commands")
def test_acos_config_in_existing_partition(self, mock_partition):
fixture = [load_fixture("acos_config_show_partition.cfg")]
mock_partition.return_value = fixture
partition_name = 'my_partition'
set_module_args(dict(partition=partition_name))
self.execute_module()
second_args = [calls[0][1]
for calls in mock_partition.call_args_list]
self.assertIn('active-partition my_partition', second_args)
@patch("ansible_collections.a10.acos_cli.plugins.modules.acos_config.run_commands")
def test_acos_config_partition_does_not_exist(self, mock_partition):
fixture = [load_fixture(
"acos_config_active-partition_my_partition3.cfg")]
mock_partition.return_value = fixture
partition_name = 'my_partition3'
set_module_args(dict(partition=partition_name))
self.assertRaises(AnsibleFailJson, self.execute_module)
with self.assertRaises(AnsibleFailJson):
result = self.execute_module()
self.assertIn('Provided partition does not exist', result['msg'])
def test_acos_config_match_exact(self):
lines = ["ip dns primary 10.18.18.81"]
set_module_args(dict(lines=lines, match="exact"))
self.execute_module()
self.conn.get_diff.assert_called_with(
candidate='ip dns primary 10.18.18.81', diff_ignore_lines=None,
diff_match='exact', running=self.running_config)
def test_acos_config_match_strict(self):
lines = ["ip dns primary 10.18.18.81"]
set_module_args(dict(lines=lines, match="strict"))
self.execute_module()
self.conn.get_diff.assert_called_with(
candidate='ip dns primary 10.18.18.81', diff_ignore_lines=None,
diff_match='strict', running=self.running_config)
def test_acos_config_match_none(self):
lines = ["ip dns primary 10.18.18.81"]
set_module_args(dict(lines=lines, match="none"))
self.execute_module()
self.conn.get_diff.assert_called_with(
candidate='ip dns primary 10.18.18.81', diff_ignore_lines=None,
diff_match='none', running=self.running_config)
``` |
{
"source": "a10networks/a10-nlbaas2oct",
"score": 2
} |
#### File: a10-nlbaas2oct/a10_nlbaas2oct/db_utils.py
```python
import oslo_i18n as i18n
from oslo_log import log as logging
_translators = i18n.TranslatorFactory(domain='a10_nlbaas2oct')
# The primary translation function using the well-known name "_"
_ = _translators.primary
LOG = logging.getLogger('a10_nlbaas2oct')
def lock_loadbalancer(n_session, lb_id):
# Lock the load balancer in neutron DB
result = n_session.execute(
"UPDATE lbaas_loadbalancers SET "
"provisioning_status = 'PENDING_UPDATE' WHERE id = :id AND "
"provisioning_status = 'ACTIVE';", {'id': lb_id})
if result.rowcount != 1:
raise Exception(_('Load balancer is not provisioning_status '
'ACTIVE'))
def unlock_loadbalancer(n_session, lb_id):
# Unlock the load balancer in neutron DB
result = n_session.execute(
"UPDATE lbaas_loadbalancers SET "
"provisioning_status = 'ACTIVE' WHERE id = :id AND "
"provisioning_status = 'PENDING_UPDATE';", {'id': lb_id})
def get_loadbalancer_ids(n_session, conf_lb_id_list=[], conf_project_id=None, conf_all=False):
lb_id_list = []
if conf_lb_id_list:
for conf_lb_id in conf_lb_id_list:
lb_id = n_session.execute(
"SELECT id FROM neutron.lbaas_loadbalancers WHERE "
"id = :id AND provisioning_status = 'ACTIVE';",
{'id': conf_lb_id}).fetchall()
if not lb_id:
lb_id = n_session.execute(
"SELECT id FROM neutron.lbaas_loadbalancers WHERE id = :id;",
{'id': conf_lb_id}).fetchall()
if lb_id:
error_msg = ('Loadbalancer with ID {} not '
'in provisioning state ACTIVE. ').format(conf_lb_id)
else:
error_msg = ('Loadbalancer with ID {} could not be found. '
'Please ensure you are using the UUID '
'instead of the name.').format(conf_lb_id)
raise Exception(_(error_msg))
lb_id_list.append(lb_id[0])
elif conf_project_id:
lb_id_list = n_session.execute(
"SELECT id FROM neutron.lbaas_loadbalancers WHERE "
"project_id = :id AND provisioning_status = 'ACTIVE';",
{'id': conf_project_id}).fetchall()
elif conf_all:
lb_id_list = n_session.execute(
"SELECT id FROM neutron.lbaas_loadbalancers WHERE "
"provisioning_status = 'ACTIVE';").fetchall()
return lb_id_list
def get_loadbalancer_entry(n_session, lb_id):
# Get the load balancer record from neutron
n_lb = n_session.execute(
"SELECT b.provider_name, a.project_id, a.name, a.description, "
"a.admin_state_up, a.operating_status, a.flavor_id, "
"a.vip_port_id, a.vip_subnet_id, a.vip_address "
"FROM lbaas_loadbalancers a JOIN providerresourceassociations b "
"ON a.id = b.resource_id WHERE ID = :id;",
{'id': lb_id}).fetchone()
return n_lb
def get_listeners_and_stats_by_lb(n_session, lb_id):
lb_stats = n_session.execute(
"SELECT bytes_in, bytes_out, active_connections, "
"total_connections FROM lbaas_loadbalancer_statistics WHERE "
"loadbalancer_id = :lb_id;", {'lb_id': lb_id}).fetchone()
listeners = n_session.execute(
"SELECT id, name, description, protocol, protocol_port, "
"connection_limit, default_pool_id, admin_state_up, "
"provisioning_status, operating_status, "
"default_tls_container_id FROM lbaas_listeners WHERE "
"loadbalancer_id = :lb_id;", {'lb_id': lb_id}).fetchall()
return listeners, lb_stats
def get_SNIs_by_listener(n_session, listener_id):
SNIs = n_session.execute(
"SELECT tls_container_id, position FROM lbaas_sni WHERE "
"listener_id = :listener_id;", {'listener_id': listener_id}).fetchall()
return SNIs
def get_l7policies_by_listener(n_session, listener_id):
l7policies = n_session.execute(
"SELECT id, name, description, listener_id, action, "
"redirect_pool_id, redirect_url, position, "
"provisioning_status, admin_state_up FROM "
"lbaas_l7policies WHERE listener_id = :listener_id AND "
"provisioning_status = 'ACTIVE';",
{'listener_id': listener_id}).fetchall()
return l7policies
def get_l7rules_by_l7policy(n_session, l7policy_id, ignore_l7rule_status=False):
if ignore_l7rule_status:
l7rules = n_session.execute(
"SELECT id, type, compare_type, invert, `key`, value, "
"provisioning_status, admin_state_up FROM lbaas_l7rules WHERE "
"l7policy_id = :l7policy_id AND (provisioning_status = 'ACTIVE' "
"OR provisioning_status = 'PENDING_CREATE');",
{'l7policy_id': l7policy_id}).fetchall()
else:
l7rules = n_session.execute(
"SELECT id, type, compare_type, invert, `key`, value, "
"provisioning_status, admin_state_up FROM lbaas_l7rules WHERE "
"l7policy_id = :l7policy_id AND provisioning_status = 'ACTIVE';",
{'l7policy_id': l7policy_id}).fetchall()
return l7rules
def get_pool_entries_by_lb(n_session, lb_id):
pools = n_session.execute(
"SELECT id, name, description, protocol, lb_algorithm, "
"healthmonitor_id, admin_state_up, provisioning_status, "
"operating_status FROM lbaas_pools WHERE loadbalancer_id "
" = :lb_id;",
{'lb_id': lb_id}).fetchall()
return pools
def get_sess_pers_by_pool(n_session, pool_id):
sp = n_session.execute(
"SELECT type, cookie_name FROM lbaas_sessionpersistences "
"WHERE pool_id = :pool_id;", {'pool_id': pool_id}).fetchone()
return sp
def get_members_by_pool(n_session, pool_id):
members = n_session.execute(
"SELECT id, subnet_id, address, protocol_port, weight, "
"admin_state_up, provisioning_status, operating_status, name FROM "
"lbaas_members WHERE pool_id = :pool_id;",
{'pool_id': pool_id}).fetchall()
return members
def get_healthmonitor(n_session, hm_id):
hm = n_session.execute(
"SELECT type, delay, timeout, max_retries, http_method, url_path, "
"expected_codes, admin_state_up, provisioning_status, name, "
"max_retries_down FROM lbaas_healthmonitors WHERE id = :hm_id AND "
"provisioning_status = 'ACTIVE';", {'hm_id': hm_id}).fetchone()
if hm is None:
raise Exception(_('Health monitor %s has invalid '
'provisioning_status.'), hm_id)
return hm
def cascade_delete_neutron_lb(n_session, lb_id):
listeners = n_session.execute(
"SELECT id FROM lbaas_listeners WHERE loadbalancer_id = :lb_id;",
{'lb_id': lb_id})
for listener in listeners:
l7policies = n_session.execute(
"SELECT id FROM lbaas_l7policies WHERE listener_id = :list_id;",
{'list_id': listener[0]})
for l7policy in l7policies:
# Delete l7rules
n_session.execute(
"DELETE FROM lbaas_l7rules WHERE l7policy_id = :l7p_id;",
{'l7p_id': l7policy[0]})
# Delete l7policies
n_session.execute(
"DELETE FROM lbaas_l7policies WHERE listener_id = :list_id;",
{'list_id': listener[0]})
# Delete SNI records
n_session.execute(
"DELETE FROM lbaas_sni WHERE listener_id = :list_id;",
{'list_id': listener[0]})
# Delete the listeners
n_session.execute(
"DELETE FROM lbaas_listeners WHERE loadbalancer_id = :lb_id;",
{'lb_id': lb_id})
pools = n_session.execute(
"SELECT id, healthmonitor_id FROM lbaas_pools "
"WHERE loadbalancer_id = :lb_id;", {'lb_id': lb_id}).fetchall()
for pool in pools:
# Delete the members
n_session.execute(
"DELETE FROM lbaas_members WHERE pool_id = :pool_id;",
{'pool_id': pool[0]})
# Delete the session persistence records
n_session.execute(
"DELETE FROM lbaas_sessionpersistences WHERE pool_id = :pool_id;",
{'pool_id': pool[0]})
# Delete the pools
n_session.execute(
"DELETE FROM lbaas_pools WHERE id = :pool_id;",
{'pool_id': pool[0]})
# Delete the health monitor
if pool[1]:
result = n_session.execute("DELETE FROM lbaas_healthmonitors "
"WHERE id = :id", {'id': pool[1]})
if result.rowcount != 1:
raise Exception(_('Failed to delete health monitor: '
'%s') % pool[1])
# Delete the lb stats
n_session.execute(
"DELETE FROM lbaas_loadbalancer_statistics WHERE "
"loadbalancer_id = :lb_id;", {'lb_id': lb_id})
# Delete provider record
n_session.execute(
"DELETE FROM providerresourceassociations WHERE "
"resource_id = :lb_id;", {'lb_id': lb_id})
# Delete the load balanacer
n_session.execute(
"DELETE FROM lbaas_loadbalancers WHERE id = :lb_id;", {'lb_id': lb_id})
def get_parent_project(k_session, tenant_id):
parent_id = k_session.execute(
"SELECT parent_id FROM project WHERE id = :id;", {'id': tenant_id}).fetchone()
return parent_id
def get_project_entry(k_session, tenant_id):
# Get the project entry from keystone DB
tenant = k_session.execute(
"SELECT id FROM project WHERE id = :id;", {'id': tenant_id}).fetchone()
return tenant
def get_tenant_by_name(k_session, name):
# Get the project entry from keystone DB
tenant = k_session.execute(
"SELECT id FROM project WHERE name = :name;", {'name': name}).fetchall()
return tenant
def get_flavor_id(o_session, conf_flavor_id):
# Get flavor id from octavia DB
flavor_id_list = []
flavor_id = o_session.execute(
"SELECT id FROM octavia.flavor WHERE id = :id;", {'id': conf_flavor_id}).fetchone()
if flavor_id:
flavor_id_list.append(flavor_id[0])
return flavor_id_list
``` |
{
"source": "a10networks/PrometheusExporter",
"score": 2
} |
#### File: a10networks/PrometheusExporter/acos_exporter.py
```python
import json
import yaml
import sys
from threading import Lock
import prometheus_client
import requests
import urllib3
from flask import Response, Flask, request
from prometheus_client import Gauge
import logging
from logging.handlers import RotatingFileHandler
UNDERSCORE = "_"
SLASH = "/"
HYPHEN = "-"
PLUS = "+"
LOG_FILE_SIZE = 5*1024*1024
API_TIMEOUT = 5
global_api_collection = dict()
global_stats = dict()
app = Flask(__name__)
_INF = float("inf")
lock1 = Lock()
tokens = dict()
def get_valid_token(host_ip, to_call=False):
global tokens
lock1.acquire()
try:
if host_ip in tokens and not to_call:
return tokens[host_ip]
else:
token = ""
if host_ip not in tokens or to_call:
token = getauth(host_ip)
if not token:
logger.error("Auth token not received.")
return ""
tokens[host_ip] = token
return tokens[host_ip]
finally:
lock1.release()
def set_logger(log_file, log_level):
log_levels = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARN': logging.WARN,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL,
}
if log_level.upper() not in log_levels:
print(log_level.upper()+" is invalid log level, setting 'INFO' as default.")
log_level = "INFO"
try:
log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(funcName)s(%(lineno)d) %(message)s')
log_handler = RotatingFileHandler(log_file, maxBytes=LOG_FILE_SIZE, backupCount=2, encoding=None,
delay=True)
log_handler.setFormatter(log_formatter)
log_handler.setLevel(log_levels[log_level.upper()]) # log levels are in order, DEBUG includes logging at each level
except Exception as e:
raise Exception('Error while setting logger config.')
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logger = logging.getLogger('a10_prometheus_exporter_logger')
logger.setLevel(log_levels[log_level.upper()])
logger.addHandler(log_handler)
return logger
def getLabelNameFromA10URL(api_list):
if type(api_list) == list:
empty_list = list()
for api in api_list:
labelName = api.replace(SLASH, UNDERSCORE)
labelName = labelName.replace(HYPHEN, UNDERSCORE)
labelName = labelName.replace(PLUS, UNDERSCORE)
empty_list.append(labelName)
return empty_list
else:
labelName = api_list.replace(SLASH, UNDERSCORE)
labelName = labelName.replace(HYPHEN, UNDERSCORE)
labelName = labelName.replace(PLUS, UNDERSCORE)
return labelName
def getauth(host):
with open('config.yml') as f:
hosts_data = yaml.safe_load(f)["hosts"]
if host not in hosts_data:
logger.error("Host credentials not found in creds config")
return ''
else:
uname = hosts_data[host].get('username','')
pwd = hosts_data[host].get('password','')
if not uname:
logger.error("username not provided.")
if not pwd:
logger.error("password not provided.")
payload = {'Credentials': {'username': uname, 'password': <PASSWORD>}}
try:
auth = json.loads(requests.post("https://{host}/axapi/v3/auth".format(host=host), json=payload,
verify=False, timeout=API_TIMEOUT).content.decode('UTF-8'))
except requests.exceptions.Timeout:
logger.error("Connection to {host} timed out. (connect timeout={timeout} secs)".format(host=host,
timeout=API_TIMEOUT))
return ''
if 'authresponse' not in auth:
logger.error("Host credentials are not correct")
return ''
return 'A10 ' + auth['authresponse']['signature']
def get(api_endpoints, endpoint, host_ip, headers):
try:
body = {
"batch-get-list": list()
}
for api_endpoint in api_endpoints:
body["batch-get-list"].append({"uri": "/axapi/v3" + api_endpoint })
batch_endpoint = "/batch-get"
logger.info("Uri - " + endpoint + batch_endpoint)
response = json.loads(
requests.post(endpoint+batch_endpoint, data=json.dumps(body), headers=headers, verify=False).content.decode('UTF-8'))
logger.debug("AXAPI batch response - " + str(response))
if 'response' in response and 'err' in response['response']:
msg = response['response']['err']['msg']
if str(msg).lower().__contains__("uri not found"):
logger.error("Request for api failed - batch-get" + ", response - " + msg)
elif str(msg).lower().__contains__("unauthorized"):
token = get_valid_token(host_ip, True)
if token:
logger.info("Re-executing an api -", endpoint+batch_endpoint, " with the new token")
headers = {'content-type': 'application/json', 'Authorization': token}
response = json.loads(
requests.post(endpoint+batch_endpoint, data=json.dumps(body), headers=headers, verify=False).content.decode('UTF-8'))
else:
logger.error("Unknown error message - ", msg)
except Exception as e:
logger.error("Exception caught - ", e)
response = ""
return response
def get_partition(endpoint, headers):
partition_endpoint = "/active-partition"
response = json.loads(requests.get(endpoint + partition_endpoint, headers=headers, verify=False).content.decode('UTF-8'))
return "partition - "+str(response)
def change_partition(partition, endpoint, headers):
partition_endpoint = "/active-partition/"+ str(partition)
logger.info("Uri - " + endpoint + partition_endpoint)
try:
requests.post(endpoint + partition_endpoint, headers=headers, verify=False)
except Exception as e:
logger.exception(e)
logger.info("Partition changed to " + partition)
@app.route("/")
def default():
return "Please provide /metrics?query-params!"
def generate_metrics(resp_data, api_name, partition, host_ip, key, res):
api = str(api_name)
if api.startswith("_"):
api = api[1:]
current_api_stats = dict()
if api in global_api_collection:
current_api_stats = global_api_collection[api]
# This section maintains local dictionary of stats or rate fields against Gauge objects.
# Code handles the duplication of key_name in time series database
# by referring the global dictionary of key_name and Gauge objects.
for key in resp_data:
org_key = key
if HYPHEN in key:
key = key.replace(HYPHEN, UNDERSCORE)
if key not in global_stats:
current_api_stats[key] = Gauge(key, "api-" + api + "key-" + key,
labelnames=(["api_name", "partition", "host"]), )
current_api_stats[key].labels(api_name=api, partition=partition, host=host_ip).set(resp_data[org_key])
global_stats[key] = current_api_stats[key]
elif key in global_stats:
global_stats[key].labels(api_name=api, partition=partition, host=host_ip).set(resp_data[org_key])
global_api_collection[api] = current_api_stats
for name in global_api_collection[api]:
res.append(prometheus_client.generate_latest(global_api_collection[api][name]))
return res
def parse_recursion(event, api_name, api_response, partition, host_ip, key,res, recursion = False):
resp_data = dict()
if event == None:
return
if type(event) == dict and "stats" not in event and "rate" not in event:
for item in event:
parse_recursion(event[item], api_name, api_response, partition, host_ip, key,res, recursion = True)
elif type(event) == dict and "stats" in event:
resp_data = event.get("stats", {})
if recursion:
api_name_slash = event.get("a10-url", "")
api_name = api_name_slash.replace("/axapi/v3","")
api_name = getLabelNameFromA10URL(api_name)
res = generate_metrics(resp_data, api_name, partition, host_ip, key,res)
elif type(event) == dict and "rate" in event:
resp_data = event.get("rate", {})
if recursion:
api_name_slash = event.get("a10-url", "")
api_name = api_name_slash.replace("/axapi/v3","")
api_name = getLabelNameFromA10URL(api_name)
res = generate_metrics(resp_data, api_name, partition, host_ip, key,res)
else:
logger.error("Stats not found for API name '{}' with response {}.".format(api_name, api_response))
#return "Stats not found for API name '{}' with response {}.".format(api_name, api_response)
return res
@app.route("/metrics")
def generic_exporter():
logger.debug("---------------------------------------------------------------------------------------------------")
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
host_ip = request.args.get("host_ip","")
api_endpoints = request.args.getlist("api_endpoint")
if not api_endpoints:
with open("apis.txt") as file:
default_endpoint = file.readlines()
default_endpoint = [endpoint.strip() for endpoint in default_endpoint]
api_endpoints = default_endpoint
logger.error("api_endpoint are of default")
api_names = getLabelNameFromA10URL(api_endpoints)
partition = request.args.get("partition", "shared")
res = []
if not host_ip:
logger.error("host_ip is required. Exiting API endpoints - {}".format(api_endpoints))
return "host_ip is required. Exiting API endpoints - {}".format(api_endpoints)
logger.info("Host = " + host_ip + "\t" +
"API = " + str(api_names))
logger.info("Endpoint = " + str(api_endpoints))
token = get_valid_token(host_ip)
if not token:
return "Authentication token not received."
endpoint = "https://{host_ip}/axapi/v3".format(host_ip=host_ip)
headers = {'content-type': 'application/json', 'Authorization': token}
logger.debug(get_partition(endpoint, headers))
if "shared" not in partition:
try:
change_partition(partition, endpoint, headers)
response = get(api_endpoints, endpoint, host_ip, headers)
finally:
change_partition("shared", endpoint, headers)
else:
response = get(api_endpoints, endpoint, host_ip, headers)
api_counter = 0
batch_list = response.get("batch-get-list", [])
for response in batch_list:
api_endpoint = api_endpoints[api_counter]
api_name = api_names[api_counter]
logger.debug("name = " + api_name)
api_response = response.get("resp", {})
logger.debug("API \"{}\" Response - {}".format(api_name, str(api_response)))
api_counter += 1
try:
key = list(api_response.keys())[0]
event = api_response.get(key, {})
res = parse_recursion(event, api_name, api_response, partition, host_ip, key,res)
except Exception as ex:
logger.exception(ex.args[0])
return api_endpoint + " has something missing."
logger.debug("Final Response - " + str(res))
return Response(res, mimetype="text/plain")
def main():
app.run(debug=True, port=9734, host='0.0.0.0')
if __name__ == '__main__':
try:
with open('config.yml') as f:
log_data = yaml.safe_load(f).get("log", {})
logger = set_logger(log_data.get("log_file","exporter.log"), log_data.get("log_level","INFO"))
logger.info("Starting exporter")
main()
except Exception as e:
print(e)
sys.exit()
```
#### File: a10networks/PrometheusExporter/client.py
```python
import sys
import json
import os
import requests
import urllib3
from random import randint
import acos_exporter
UNDERSCORE = "_"
SLASH = "/"
HYPHEN = "-"
PLUS = "+"
job = '''
- job_name: 'name_replace'
static_configs:
- targets: ['localhost:9734']
metrics_path: '/metrics'
params:
host_ip: ["ip_replace"]
api_endpoint: ["api_endpoint_replace"]
api_name: ["api_names_replace"]
'''
yml = '''
global:
scrape_interval: 15s
evaluation_interval: 15s
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
'''
def getauth(ip):
with open('config.json') as f:
data = json.load(f)["hosts"]
if ip not in data:
print("Host credentials not found in creds config")
return ''
else:
uname = data[ip]['username']
pwd = data[ip]['password']
payload = {'Credentials': {'username': uname, 'password': <PASSWORD>}}
auth = json.loads(
requests.post("https://{host}/axapi/v3/auth".format(host=ip), json=payload, verify=False).content.decode(
'UTF-8'))
return 'A10 ' + auth['authresponse']['signature']
def execute(ip):
if ip:
list = postdatatoapi(ip)
createyml(ip, list)
runexporter()
else:
print("Host not provided, exiting")
sys.exit()
def runexporter():
with open('config.json') as f:
data = json.load(f)["log"]
try:
acos_exporter.logger = acos_exporter.set_logger(data["log_file"], data["log_level"])
except Exception as e:
print("Config file is not correct")
print(e)
sys.exit()
acos_exporter.logger.info("Starting exporter")
acos_exporter.main()
def postdatatoapi(ip):
list = getapilist(ip)
for api in list:
json = getformat(ip, api)
for key in json:
for value in json[key]['stats']:
json[key]['stats'][value] = randint(1, 10)
print(poststats(ip, api, json))
return list
def createyml(ip, list):
ct = 1
data = yml
for item in list:
name = "prometheus_"
api = item.split("/axapi/v3")[1].split("/stats")[0]
replaced = job.replace("name_replace", name + "job_" + str(ct)).replace("ip_replace", ip).replace(
"api_endpoint_replace", api)
if HYPHEN in api:
api = api.replace(HYPHEN, UNDERSCORE)
if PLUS in api:
api = api.replace(PLUS, UNDERSCORE)
if SLASH in api:
api = api.replace(SLASH, UNDERSCORE)
replaced = replaced.replace("api_names_replace", api)
ct = ct + 1
data = data + replaced
#generating prometheus.yml in current working directory
file1 = open(os.getcwd()+'/prometheus.yml', 'w')
file1.write(data)
file1.close()
def poststats(ip, api, json2):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
token = getauth(ip)
if token == '':
print("Username, password does not match, token can not be empty, exiting")
sys.exit()
endpoint = "https://" + ip + ":443" + api
headers = {'content-type': 'application/json', 'Authorization': token}
return json.loads(
requests.post(endpoint, json=json2, verify=False, headers=headers).content.decode('UTF-8'))
def getformat(ip, api):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
token = getauth(ip)
if token == '':
print("Username, password does not match, token can not be empty, exiting")
sys.exit()
endpoint = "http://" + ip + api
headers = {'content-type': 'application/json', 'Authorization': token}
return json.loads(
requests.get(endpoint, headers=headers, verify=False).content.decode('UTF-8'))
def getapilist(ip):
file1 = open('apis.txt', 'r')
Lines = file1.readlines()
list = []
for line in Lines:
line = line.strip()
list.append(line)
return list
if __name__ == "__main__":
execute(sys.argv[1])
``` |
{
"source": "a10pepo/parrot_ar_drone",
"score": 2
} |
#### File: parrot_ar_drone/code/main_script.py
```python
import libardrone
#import pygame
from time import sleep
import time
import cv2
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from threading import Thread
import os
import math
from shapely.geometry import LineString
from shapely.geometry import Point
from bluepy.btle import DefaultDelegate, Peripheral, Scanner
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
debug=True
drone = libardrone.ARDrone()
ds_test = pd.DataFrame()
ds_oper = pd.DataFrame()
p = Point(0,0)
running = True
route={'p0':Point(0,0),'p1':Point(1,0),'p2':Point(1,1)}
class ScanDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
def handleDiscovery(self, dev, isNewDev, isNewData):
"""if isNewDev:
print ("Discovered device", dev.addr)
elif isNewData:
print ("Received new data from", dev.addr)
"""
def handleNotification(self, cHandle, data):
print(data)
def line_intersection(line1, line2):
xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
div = det(xdiff, ydiff)
if div == 0:
raise Exception('lines do not intersect')
d = (det(*line1), det(*line2))
x = det(d, xdiff) / div
y = det(d, ydiff) / div
return x, y
def setble(x,y,radius,clr):
return plt.Circle((x,y),radius,fc="none",edgecolor=clr)
def returnpoint(x0,y0,r0,x1,y1,r1,x2,y2,r2):
result=calculateintersection(x0,y0,r0,x1,y1,r1)
result2=calculateintersection(x0,y0,r0,x2,y2,r2)
result3=calculateintersection(x1,y1,r1,x2,y2,r2)
if debug:
print("Intersection 0/1")
print(result)
print("Intersection 0/2")
print(result2)
print("Intersection 1/2")
print(result3)
time.sleep(5)
point=None
if len(result) > 0:
if len(result2) > 0:
message="case A: Circle 0&1 & 0&2 intersect"
point_int=line_intersection([(result[0],result[1]), (result[2],result[3])], [(result2[0],result2[1]), (result2[2],result2[3])])
point=Point(point_int[0],point_int[1])
elif len(result3) > 0:
message="case B: Circle 0&1 & 1&2 intersect"
point_int=line_intersection([(result[0],result[1]), (result[2],result[3])], [(result3[0],result3[1]), (result3[2],result3[3])])
point=Point(point_int[0],point_int[1])
else:
message="case G: Circle 0&1 intersect"
point=Point((result[0]+result[2])/2,(result[1]+result[3])/2)
elif len(result3) > 0 and len(result2) > 0:
message="case C: Circle 0&1 & 1&2 intersect"
point_int=line_intersection([(result2[0],result2[1]), (result2[2],result2[3])], [(result3[0],result3[1]), (result3[2],result3[3])])
point=Point(point_int[0],point_int[1])
elif len(result3) > 0:
message="case D: Circle 1&2 intersect: Medium point"
point=Point((result3[0]+result3[2])/2,(result3[1]+result3[3])/2)
elif len(result2) > 0:
message="case E: Circle 0&2 intersect: Medium point"
point=Point((result2[0]+result2[2])/2,(result2[1]+result2[3])/2)
else:
message="case F: No intersection ERROR no signal"
point=Point(0,0)
#x = np.linspace(0, 1, 100000)
"""fig, ax = plt.subplots(figsize=(12, 10))
plt.grid(linestyle='--')
ax.set_aspect(1)
ax.add_artist(setble(x0,y0,r0,"r")) # Beacon1
ax.add_artist(setble(x1,y1,r1,"g")) # Beacon2
ax.add_artist(setble(x2,y2,r2,"b")) # Beacon3
if len(result) >0:
ax.add_artist(setble(result[0],result[1],0.01,"b")) # Samsung
ax.add_artist(setble(result[2],result[3],0.01,"b")) # Samsung
if len(result2) >0:
ax.add_artist(setble(result2[0],result2[1],0.01,"b")) # Samsung
ax.add_artist(setble(result2[2],result2[3],0.01,"b")) # Samsung
if len(result3) >0:
ax.add_artist(setble(result3[0],result3[1],0.01,"b")) # Samsung
ax.add_artist(setble(result3[2],result3[3],0.01,"b")) # Samsung
ax.add_artist(setble(point.x,point.y,0.01,"r"))
"""
if debug:
print(message)
return point
def calculateintersection(x0,y0,r0,x1,y1,r1):
EPSILON = 0.000001;
dx = x1-x0
dy = y1-y0
d=math.sqrt((dy*dy)+(dx*dx))
if d>r0+r1:
return []
if d < abs(r0-r1):
return []
a = ((r0*r0) - (r1*r1) + (d*d)) / (2.0 * d)
point2_x = x0 + (dx * a/d)
point2_y = y0 + (dy * a/d)
h = math.sqrt((r0*r0) - (a*a))
rx = -dy * (h/d)
ry = dx * (h/d)
intersectionPoint1_x = point2_x + rx
intersectionPoint2_x = point2_x - rx
intersectionPoint1_y = point2_y + ry
intersectionPoint2_y = point2_y - ry
return [intersectionPoint1_x,intersectionPoint1_y,intersectionPoint2_x,intersectionPoint2_y]
def inverse(x):
return x*(-1)
def get_current_position():
MIN=30
MAX=100
SCALE=1.3
scanner = Scanner().withDelegate(ScanDelegate())
devices = scanner.scan(1.9)
devlist=['30:ae:a4:9c:e7:c2','30:ae:a4:97:6c:26','30:ae:a4:9c:8f:a2']
global ds_test
for dev in devices:
if dev.addr in devlist:
# print("ADDR: %s" % (dev.addr))
data = [[time.time(),dev.addr,-1*dev.rssi,dev.iface,dev.addrType,dev.getValueText(1),dev.getValueText(10),dev.getValueText(255)]]
#data = [[time.time()]]
ds_test=ds_test.append(data)
#print("bucle: %d" % len(ds_test))
if len(ds_test) == 0:
return None
# else:
# print("bucle: %d" % (len(ds_test[ds_test[2]>=MIN])))
# print("bucle2: %d" % len(ds_test))
# return None
ds_test=ds_test[ds_test[2]>=MIN]
if debug:
print("bucle2: %d" % len(ds_test))
print(len(ds_test[ds_test[0]-(time.time())<2000]))
#ds_test["rssi_norm"]=MAX-ds_test[2]
ds_test["rssi_norm"]=(ds_test[2]-MIN)/(MAX-MIN)
#ds_test["rssi_norm"]=1-ds_test["rssi_norm"]
ds_test["rssi_norm"]=ds_test["rssi_norm"]*SCALE
ds_test_b3=ds_test[ds_test[1]=="30:ae:a4:97:6c:26"] # 3
ds_test_b1=ds_test[ds_test[1]=="30:ae:a4:9c:e7:c2"] # 1
ds_test_b2=ds_test[ds_test[1]=="30:ae:a4:9c:8f:a2"] # 2
ds_b1 = 0
ds_b2 = 0
ds_b3 = 0
if len(ds_test_b1) > 0 and time.time()-ds_test_b1.iloc[-1][0] < 10:
ds_b1 = ds_test_b1.iloc[-1]["rssi_norm"]
if len(ds_test_b2) > 0 and time.time()-ds_test_b2.iloc[-1][0] < 10:
ds_b2 = ds_test_b2.iloc[-1]["rssi_norm"]
if len(ds_test_b3) > 0 and time.time()-ds_test_b3.iloc[-1][0] < 10 :
ds_b3 = ds_test_b3.iloc[-1]["rssi_norm"]
print("Beacon 1: %s" % (ds_b1))
print("Beacon 2: %s" % (ds_b2))
print("Beacon 3: %s" % (ds_b3))
if debug:
print("Summary:")
print("#########")
print("len ds_test %d" % (len(ds_test)))
print(ds_b1)
print(ds_b2)
print(ds_b3)
print("#########")
print("Position 1: %s" % (ds_b1))
print("Position 2: %s" % (ds_b2))
print("Position 3: %s" % (ds_b3))
point=returnpoint(0,0,ds_b1,0,1,ds_b2,1,0.5,ds_b3)
if point == None:
point = Point(0,0)
data = [[time.time()*1000,str(point.x),str(point.y),0,0,ds_b1,0,1,ds_b2,1,0.5,ds_b3]]
temp = pd.DataFrame(data)
temp.to_csv('/home/pepo/Documents/nissan_code/Loc_csv.csv',mode='a', header=False)
return point
def get_info():
print('Battery %i%%' % drone.navdata.get(0,dict()).get('battery',0))
print('State %i' % drone.navdata.get(0,dict()).get('ctrl_state',0))
print('Theta %i' % drone.navdata.get(0,dict()).get('theta',0))
print('Phi %i' % drone.navdata.get(0,dict()).get('phi',0))
print('PSI %i' % drone.navdata.get(0,dict()).get('psi',0))
print('Altitude %i' % drone.navdata.get(0,dict()).get('altitude',0))
print('vx %i' % drone.navdata.get(0,dict()).get('vx',0))
print('vy %i' % drone.navdata.get(0,dict()).get('vy',0))
print('vz %i' % drone.navdata.get(0,dict()).get('vz',0))
def get_detail(name):
return drone.navdata.get(0,dict()).get(name,0)
def takeoff(height):
drone.takeoff()
def move_left(secs):
drone.move_left()
sleep(secs)
def move_right(secs):
drone.move_right()
sleep(secs)
def turn_right(secs):
drone.turn_right()
sleep(secs)
def turn_left(secs):
drone.turn_left()
sleep(secs)
def move_ff(secs):
drone.move_forward()
sleep(secs)
def move_back(secs):
drone.move_backward()
sleep(secs)
def move_up(secs):
drone.move_up()
sleep(secs)
def move_down(secs):
drone.move_down()
sleep(secs)
def threadlocation(threadname):
global p
global running
while running:
p = get_current_position()
if debug:
print(p)
if p == None:
p = Point(0,0)
os._exit(0)
def main():
global running
cam = cv2.VideoCapture('tcp://192.168.1.1:5555')
#drone = libardrone.ARDrone()
drone.takeoff()
while running:
# get current frame of video
running, frame = cam.read()
str_image = ("Location: X(%s) , Y(%s) \n Battery: %s \n Height: %s" % (str(round(p.x,2)),str(round(p.y,2)),str(get_detail('battery')),str(get_detail('altitude'))))
font=cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame,str_image,(0,30),font,0.5,(0,255,0),1,cv2.LINE_AA,bottomLeftOrigin=False)
#print(get_current_position())
if running:
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == 27:
# escape key pressed
running = False
print("Exit requested")
else:
# error reading frame
print ('error reading video feed')
drone.land()
cam.release()
cv2.destroyAllWindows()
os._exit(0)
def inittrack():
THRESHOLD=0.002
global p
global ds_oper
items=len(route)
i=0
while i<items:
name = 'p'+str(i)
target = route[name]
print("Situation")
print("Target X: %s Y: %s" % (str(target.x),str(target.y)))
print("Position X: %s Y: %s" % (str(p.x),str(p.y)))
print("Distance:")
print("X: %s Y: %s" % (str(p.x-target.x),str(p.y-target.y)))
a=p.x-target.x
b=p.y-target.y
op=""
if abs(a)<THRESHOLD and abs(b)<THRESHOLD:
i=i+1
print("point found")
if a>0:
op=op+"B"
print("move backwards")
else:
op=op+"F"
print("move forwards")
if b<0:
op=op+"R"
print("move right")
else:
op=op+"L"
print("move left")
data = [[time.time()*1000,str(target.x),str(target.y),str(p.x),str(p.y),op]]
temp = pd.DataFrame(data)
# temp.to_csv('/home/pepo/Documents/nissan_code/Loc_csv.csv',mode='a', header=False)
time.sleep(2)
if __name__ == '__main__':
try:
drone.trim()
drone.speed = 0.2
if False:
thread_loc = Thread(target=threadlocation, args=['t1'])
thread_loc.start()
if False:
thread_main = Thread(target=main, args=[])
thread_main.start()
# inittrack()
print("take off")
drone.takeoff()
sleep(5)
print("move up")
drone.move_up()
sleep(5)
print("move left")
drone.move_left()
sleep(3)
print("hover")
drone.hover()
sleep(1)
print("move down")
drone.move_down()
sleep(5)
print("move right")
drone.move_right()
sleep(3)
print("hover")
drone.hover()
sleep(1)
# sleep(1)
print("land")
drone.land()
except (SystemExit,KeyboardInterrupt):
drone.land()
drone.halt()
print("Dron Aborted")
except:
drone.land()
drone.halt()
print("Dron Exception Aborted")
drone.halt()
```
#### File: code/organized/drone.py
```python
import location
import applogger as log
import time
import olympe
from olympe.messages.ardrone3.Piloting import TakeOff, moveBy, Landing
from olympe.messages.ardrone3.PilotingState import moveToChanged, FlyingStateChanged, PositionChanged, AttitudeChanged
def perform(operation,distance):
for action in operation:
if action==location.UP:
log.log(log.INFO,"Action sent: UP")
if action==location.DOWN:
log.log(log.INFO,"Action sent: DOWN")
if action==location.LEFT:
log.log(log.INFO,"Action sent: LEFT")
drone(moveBy(0, 1*distance.x, 0, 0) >> FlyingStateChanged(state="hovering", _timeout=5)).wait()
if action==location.RIGHT:
log.log(log.INFO,"Action sent: RIGHT")
drone(moveBy(0, -1*distance.x, 0, 0) >> FlyingStateChanged(state="hovering", _timeout=5)).wait()
if action==location.FORWARD:
log.log(log.INFO,"Action sent: FWD")
drone(moveBy(1*distance.y, 0, 0, 0) >> FlyingStateChanged(state="hovering", _timeout=5)).wait()
if action==location.BACKWARD:
log.log(log.INFO,"Action sent: BACKWARD")
drone(moveBy(-1*distance.y, 0, 0, 0) >> FlyingStateChanged(state="hovering", _timeout=5)).wait()
return
def evalpicture(image):
t1 = time.time()
log.log(log.INFO,"Image Sent for Evaluation")
# Invoke models
t2 = time.time()
log.log(log.INFO,"Image Evaluated")
log.timer("Image Evaluated",t2-t1)
return
def scan():
return
def init():
global drone
drone = olympe.Drone("10.202.0.1")
drone.connection()
drone(TakeOff() >> FlyingStateChanged(state="hovering", _timeout=5)).wait()
return
def end():
land()
drone.disconnection()
def land():
drone(Landing()).wait()
``` |
{
"source": "a1136395507/Blog",
"score": 3
} |
#### File: Blog/app/__init__.py
```python
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from app.utils.json_formater import JSONFormatter
from config.config import config_map
db = SQLAlchemy()
# 初始化登录操作
login_manager = LoginManager()
login_manager.session_protection = "strong"
login_manager.login_view = "Auth.login"
def configure_logging(app):
"""Configure file(info) and email(error) logging."""
import os
import logging
from logging.handlers import RotatingFileHandler
from flask.logging import default_handler
app.logger.setLevel(logging.INFO)
if not os.path.isdir(app.config['LOG_FOLDER']):
os.makedirs(app.config['LOG_FOLDER'])
format_str = "%(asctime)s %(levelname)s %(process)d %(thread)d %(filename)s-%(funcName)s:%(lineno)d %(message)s"
format = logging.Formatter(format_str)
info_log = os.path.join(app.config['LOG_FOLDER'], 'info.log')
info_file_handler = RotatingFileHandler(info_log, maxBytes=100000, backupCount=10)
info_file_handler.setLevel(logging.INFO)
info_file_handler.setFormatter(format)
app.logger.addHandler(info_file_handler)
error_log = os.path.join(app.config['LOG_FOLDER'], 'error.log')
error_file_handler = RotatingFileHandler(error_log, maxBytes=100000, backupCount=10)
error_file_handler.setLevel(logging.ERROR)
app.logger.addHandler(error_file_handler)
app.logger.removeHandler(default_handler)
def create_app(config_name='dev'):
"""
初始化flask app ,config_name 为对于的环境名 DEV|PRD
config_name : 环境名
:return:
"""
app = Flask(__name__)
app.config.from_object(config_map[config_name])
app.config['JSON_AS_ASCII'] = False
# config[config_name].init_app(app)
if not db.app:
app.logger.debug("Create db.app:{} pid:{}".format(app, os.getpid()))
db.app = app
db.init_app(app)
login_manager.init_app(app)
configure_logging(app)
return app
```
#### File: Blog/config/config.py
```python
import os
from config import DB_USER_INFO_MYSQL,DB_BLOG_PRODUCT_MYSQL,DB_URL
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
"""
基础配置
"""
DEBUG = True
SECRET_KEY = os.environ.get('SECRET_KEY') or 'this is a complicated string'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_POOL_SIZE = 200
SQLALCHEMY_POOL_RECYCLE = 3600
SQLALCHEMY_ENGINE_OPTIONS = {
'pool_recycle': 280,
'pool_timeout': 300,
'pool_size': 50,
'max_overflow': 5,
}
SQL_USER_POOL = DB_USER_INFO_MYSQL.PYMYSQL_POOL
SQL_PRODUCT_POOL = DB_BLOG_PRODUCT_MYSQL.PYMYSQL_POOL
LOG_FOLDER = os.path.join(basedir, 'logs')
HOST = "0.0.0.0"
PORT = 5000
SQLALCHEMY_DATABASE_URI = DB_URL
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
"""测试环境"""
DEBUG = True
ENV = "dev"
class ProductionConfig(Config):
""" 生产环境"""
DEBUG = False
ENV = "prd"
config_map = {
'dev': DevelopmentConfig,
'prd': ProductionConfig,
}
``` |
{
"source": "a113n/bcbio-nextgen",
"score": 2
} |
#### File: bcbio/bam/__init__.py
```python
from __future__ import print_function
import collections
import os
import signal
import subprocess
import numpy
import pybedtools
import pysam
import toolz as tz
from six.moves import zip_longest
from bcbio import broad, utils
from bcbio.bam import ref
from bcbio.distributed import objectstore
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.pipeline import config_utils
import bcbio.pipeline.datadict as dd
from bcbio.provenance import do
def is_empty(bam_file):
"""Determine if a BAM file is empty
"""
bam_file = objectstore.cl_input(bam_file)
cmd = ("set -o pipefail; "
"samtools view {bam_file} | head -1 | wc -l")
p = subprocess.Popen(cmd.format(**locals()), shell=True,
executable=do.find_bash(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL))
stdout, stderr = p.communicate()
stdout = stdout.decode()
stderr = stderr.decode()
if ((p.returncode == 0 or p.returncode == 141) and
(stderr == "" or (stderr.startswith("gof3r") and stderr.endswith("broken pipe")))):
return int(stdout) == 0
else:
raise ValueError("Failed to check empty status of BAM file: %s" % str(stderr))
def is_paired(bam_file):
"""Determine if a BAM file has paired reads.
Works around issues with head closing the samtools pipe using signal trick from:
http://stackoverflow.com/a/12451083/252589
"""
bam_file = objectstore.cl_input(bam_file)
cmd = ("set -o pipefail; "
"samtools view -h {bam_file} | head -300000 | "
"samtools view -S -f 1 /dev/stdin | head -1 | wc -l")
p = subprocess.Popen(cmd.format(**locals()), shell=True,
executable=do.find_bash(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL))
stdout, stderr = p.communicate()
stdout = stdout.decode()
stderr = stderr.decode()
stderr = stderr.strip()
if ((p.returncode == 0 or p.returncode == 141) and
(stderr == "" or (stderr.startswith("gof3r") and stderr.endswith("broken pipe")))):
return int(stdout) > 0
else:
raise ValueError("Failed to check paired status of BAM file: %s" % str(stderr))
def fake_index(in_bam, data):
"""Create a fake index file for namesorted BAMs. bais require by CWL for consistency.
"""
index_file = "%s.bai" % in_bam
if not utils.file_exists(index_file):
with file_transaction(data, index_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
out_handle.write("name sorted -- no index")
return index_file
def index(in_bam, config, check_timestamp=True):
"""Index a BAM file, skipping if index present.
Centralizes BAM indexing providing ability to switch indexing approaches.
"""
assert is_bam(in_bam), "%s in not a BAM file" % in_bam
index_file = "%s.bai" % in_bam
alt_index_file = "%s.bai" % os.path.splitext(in_bam)[0]
if check_timestamp:
bai_exists = utils.file_uptodate(index_file, in_bam) or utils.file_uptodate(alt_index_file, in_bam)
else:
bai_exists = utils.file_exists(index_file) or utils.file_exists(alt_index_file)
if not bai_exists:
# Remove old index files and re-run to prevent linking into tx directory
for fname in [index_file, alt_index_file]:
utils.remove_safe(fname)
samtools = config_utils.get_program("samtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
with file_transaction(config, index_file) as tx_index_file:
cmd = "{samtools} index -@ {num_cores} {in_bam} {tx_index_file}"
do.run(cmd.format(**locals()), "Index BAM file: %s" % os.path.basename(in_bam))
return index_file if utils.file_exists(index_file) else alt_index_file
def remove(in_bam):
"""
remove bam file and the index if exists
"""
if utils.file_exists(in_bam):
utils.remove_safe(in_bam)
if utils.file_exists(in_bam + ".bai"):
utils.remove_safe(in_bam + ".bai")
def idxstats(in_bam, data):
"""Return BAM index stats for the given file, using samtools idxstats.
"""
index(in_bam, data["config"], check_timestamp=False)
AlignInfo = collections.namedtuple("AlignInfo", ["contig", "length", "aligned", "unaligned"])
samtools = config_utils.get_program("samtools", data["config"])
idxstats_out = subprocess.check_output([samtools, "idxstats", in_bam]).decode()
out = []
for line in idxstats_out.split("\n"):
if line.strip():
contig, length, aligned, unaligned = line.split("\t")
out.append(AlignInfo(contig, int(length), int(aligned), int(unaligned)))
return out
def fai_from_bam(ref_file, bam_file, out_file, data):
"""Create a fai index with only contigs in the input BAM file.
"""
contigs = set([x.contig for x in idxstats(bam_file, data)])
if not utils.file_uptodate(out_file, bam_file):
with open(ref.fasta_idx(ref_file, data["config"])) as in_handle:
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for line in (l for l in in_handle if l.strip()):
if line.split()[0] in contigs:
out_handle.write(line)
return out_file
def ref_file_from_bam(bam_file, data):
"""Subset a fasta input file to only a fraction of input contigs.
"""
new_ref = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), "inputs", "ref")),
"%s-subset.fa" % dd.get_genome_build(data))
if not utils.file_exists(new_ref):
with file_transaction(data, new_ref) as tx_out_file:
contig_file = "%s-contigs.txt" % utils.splitext_plus(new_ref)[0]
with open(contig_file, "w") as out_handle:
for contig in [x.contig for x in idxstats(bam_file, data) if x.contig != "*"]:
out_handle.write("%s\n" % contig)
cmd = "seqtk subseq -l 100 %s %s > %s" % (dd.get_ref_file(data), contig_file, tx_out_file)
do.run(cmd, "Subset %s to BAM file contigs" % dd.get_genome_build(data))
ref.fasta_idx(new_ref, data["config"])
runner = broad.runner_from_path("picard", data["config"])
runner.run_fn("picard_index_ref", new_ref)
return {"base": new_ref}
def get_downsample_pct(in_bam, target_counts, data):
"""Retrieve percentage of file to downsample to get to target counts.
Avoids minimal downsample which is not especially useful for
improving QC times; 90& or more of reads.
"""
total = sum(x.aligned for x in idxstats(in_bam, data))
with pysam.Samfile(in_bam, "rb") as work_bam:
n_rgs = max(1, len(work_bam.header.get("RG", [])))
rg_target = n_rgs * target_counts
if total > rg_target:
pct = float(rg_target) / float(total)
if pct < 0.9:
return pct
def get_aligned_reads(in_bam, data):
index(in_bam, data["config"], check_timestamp=False)
bam_stats = idxstats(in_bam, data)
align = sum(x.aligned for x in bam_stats)
unaligned = sum(x.unaligned for x in bam_stats)
total = float(align + unaligned)
return 1.0 * align / total
def downsample(in_bam, data, target_counts, work_dir=None):
"""Downsample a BAM file to the specified number of target counts.
"""
index(in_bam, data["config"], check_timestamp=False)
ds_pct = get_downsample_pct(in_bam, target_counts, data)
if ds_pct:
out_file = "%s-downsample%s" % os.path.splitext(in_bam)
if work_dir:
out_file = os.path.join(work_dir, os.path.basename(out_file))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
samtools = config_utils.get_program("samtools", data["config"])
num_cores = dd.get_num_cores(data)
ds_pct = "42." + "{ds_pct:.3}".format(ds_pct=ds_pct).replace("0.", "")
cmd = ("{samtools} view -O BAM -@ {num_cores} -o {tx_out_file} "
"-s {ds_pct} {in_bam}")
do.run(cmd.format(**locals()), "Downsample BAM file: %s" % os.path.basename(in_bam))
return out_file
def get_maxcov_downsample_cl(data, in_pipe=None):
"""Retrieve command line for max coverage downsampling, fitting into bamsormadup output.
"""
max_cov = _get_maxcov_downsample(data) if dd.get_aligner(data) not in ["snap"] else None
if max_cov:
if in_pipe == "bamsormadup":
prefix = "level=0"
elif in_pipe == "samtools":
prefix = "-l 0"
else:
prefix = ""
# Swap over to multiple cores until after testing
#core_arg = "-t %s" % dd.get_num_cores(data)
core_arg = ""
return ("%s | variant - -b %s --mark-as-qc-fail --max-coverage %s"
% (prefix, core_arg, max_cov))
else:
if in_pipe == "bamsormadup":
prefix = "indexfilename={tx_out_file}.bai"
else:
prefix = ""
return prefix
def _get_maxcov_downsample(data):
"""Calculate maximum coverage downsampling for whole genome samples.
Returns None if we're not doing downsampling.
"""
from bcbio.bam import ref
from bcbio.ngsalign import alignprep, bwa
from bcbio.variation import coverage
fastq_file = data["files"][0]
params = alignprep.get_downsample_params(data)
if params:
num_reads = alignprep.total_reads_from_grabix(fastq_file)
if num_reads:
vrs = dd.get_variant_regions_merged(data)
total_size = sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])])
if vrs:
callable_size = pybedtools.BedTool(vrs).total_coverage()
genome_cov_pct = callable_size / float(total_size)
else:
callable_size = total_size
genome_cov_pct = 1.0
if (genome_cov_pct > coverage.GENOME_COV_THRESH
and dd.get_coverage_interval(data) in ["genome", None, False]):
total_counts, total_sizes = 0, 0
for count, size in bwa.fastq_size_output(fastq_file, 5000):
total_counts += int(count)
total_sizes += (int(size) * int(count))
read_size = float(total_sizes) / float(total_counts)
avg_cov = float(num_reads * read_size) / callable_size
if avg_cov >= params["min_coverage_for_downsampling"]:
return int(avg_cov * params["maxcov_downsample_multiplier"])
return None
def check_header(in_bam, rgnames, ref_file, config):
"""Ensure passed in BAM header matches reference file and read groups names.
"""
_check_bam_contigs(in_bam, ref_file, config)
_check_sample(in_bam, rgnames)
def _check_sample(in_bam, rgnames):
"""Ensure input sample name matches expected run group names.
"""
with pysam.Samfile(in_bam, "rb") as bamfile:
rg = bamfile.header.get("RG", [{}])
msgs = []
warnings = []
if len(rg) > 1:
warnings.append("Multiple read groups found in input BAM. Expect single RG per BAM.")
if len(rg) == 0:
msgs.append("No read groups found in input BAM. Expect single RG per BAM.")
if len(rg) > 0 and any(x.get("SM") != rgnames["sample"] for x in rg):
msgs.append("Read group sample name (SM) does not match configuration `description`: %s vs %s"
% (rg[0].get("SM"), rgnames["sample"]))
if len(msgs) > 0:
raise ValueError("Problems with pre-aligned input BAM file: %s\n" % (in_bam)
+ "\n".join(msgs) +
"\nSetting `bam_clean: fixrg`\n"
"in the configuration can often fix this issue.")
if warnings:
print("*** Potential problems in input BAM compared to reference:\n%s\n" %
"\n".join(warnings))
def _check_bam_contigs(in_bam, ref_file, config):
"""Ensure a pre-aligned BAM file matches the expected reference genome.
"""
# GATK allows chromosome M to be in multiple locations, skip checking it
allowed_outoforder = ["chrM", "MT"]
ref_contigs = [c.name for c in ref.file_contigs(ref_file, config)]
with pysam.Samfile(in_bam, "rb") as bamfile:
bam_contigs = [c["SN"] for c in bamfile.header["SQ"]]
extra_bcs = [x for x in bam_contigs if x not in ref_contigs]
extra_rcs = [x for x in ref_contigs if x not in bam_contigs]
problems = []
warnings = []
for bc, rc in zip_longest([x for x in bam_contigs if (x not in extra_bcs and
x not in allowed_outoforder)],
[x for x in ref_contigs if (x not in extra_rcs and
x not in allowed_outoforder)]):
if bc != rc:
if bc and rc:
problems.append("Reference mismatch. BAM: %s Reference: %s" % (bc, rc))
elif bc:
warnings.append("Extra BAM chromosomes: %s" % bc)
elif rc:
warnings.append("Extra reference chromosomes: %s" % rc)
for bc in extra_bcs:
warnings.append("Extra BAM chromosomes: %s" % bc)
for rc in extra_rcs:
warnings.append("Extra reference chromosomes: %s" % rc)
if problems:
raise ValueError("Unexpected order, name or contig mismatches between input BAM and reference file:\n%s\n"
"Setting `bam_clean: remove_extracontigs` in the configuration can often fix this issue."
% "\n".join(problems))
if warnings:
print("*** Potential problems in input BAM compared to reference:\n%s\n" %
"\n".join(warnings))
def open_samfile(in_file):
if is_bam(in_file):
return pysam.Samfile(in_file, "rb")
elif is_sam(in_file):
return pysam.Samfile(in_file, "r")
else:
raise IOError("in_file must be either a BAM file or SAM file. Is the "
"extension .sam or .bam?")
def is_bam(in_file):
_, ext = os.path.splitext(in_file)
if ext == ".bam":
return True
else:
return False
def is_sam(in_file):
_, ext = os.path.splitext(in_file)
if ext == ".sam":
return True
else:
return False
def sam_to_bam(in_sam, config):
if is_bam(in_sam):
return in_sam
assert is_sam(in_sam), "%s is not a SAM file" % in_sam
out_file = os.path.splitext(in_sam)[0] + ".bam"
if utils.file_exists(out_file):
return out_file
samtools = config_utils.get_program("samtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
with file_transaction(config, out_file) as tx_out_file:
cmd = "{samtools} view -@ {num_cores} -h -S -b {in_sam} -o {tx_out_file}"
do.run(cmd.format(**locals()),
("Convert SAM to BAM (%s cores): %s to %s"
% (str(num_cores), in_sam, out_file)))
return out_file
def bam_to_sam(in_file, config):
if is_sam(in_file):
return in_file
assert is_bam(in_file), "%s is not a BAM file" % in_file
out_file = os.path.splitext(in_file)[0] + ".sam"
if utils.file_exists(out_file):
return out_file
samtools = config_utils.get_program("samtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
with file_transaction(config, out_file) as tx_out_file:
cmd = "{samtools} view -@ {num_cores} -h {in_file} -o {tx_out_file}"
do.run(cmd.format(**locals()),
("Convert BAM to SAM (%s cores): %s to %s"
% (str(num_cores), in_file, out_file)))
return out_file
def reheader(header, bam_file, config):
samtools = config_utils.get_program("samtools", config)
base, ext = os.path.splitext(bam_file)
out_file = base + ".reheadered" + ext
cmd = "{samtools} reheader {header} {bam_file} > {out_file}"
do.run(cmd.format(**locals()), "Reheadering %s." % bam_file)
return out_file
def merge(bamfiles, out_bam, config):
assert all(map(is_bam, bamfiles)), ("Not all of the files to merge are not BAM "
"files: %s " % (bamfiles))
assert all(map(utils.file_exists, bamfiles)), ("Not all of the files to merge "
"exist: %s" % (bamfiles))
if len(bamfiles) == 1:
return bamfiles[0]
if os.path.exists(out_bam):
return out_bam
samtools = config_utils.get_program("samtools", config)
bamtools = config_utils.get_program("bamtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
with file_transaction(config, out_bam) as tx_out_bam:
cmd = "{samtools} merge -@ {num_cores} {tx_out_bam} " + " ".join(bamfiles)
do.run(cmd.format(**locals()), "Merge %s into %s." % (bamfiles, out_bam))
index(out_bam, config)
return out_bam
def sort(in_bam, config, order="coordinate", out_dir=None, force=False):
"""Sort a BAM file, skipping if already present.
"""
assert is_bam(in_bam), "%s in not a BAM file" % in_bam
if not force and bam_already_sorted(in_bam, config, order):
return in_bam
sort_stem = _get_sort_stem(in_bam, order, out_dir)
sort_file = sort_stem + ".bam"
if not utils.file_exists(sort_file):
samtools = config_utils.get_program("samtools", config)
cores = config["algorithm"].get("num_cores", 1)
with file_transaction(config, sort_file) as tx_sort_file:
tx_sort_stem = os.path.splitext(tx_sort_file)[0]
tx_dir = utils.safe_makedir(os.path.dirname(tx_sort_file))
order_flag = "-n" if order == "queryname" else ""
resources = config_utils.get_resources("samtools", config)
# Slightly decrease memory and allow more accurate representation
# in Mb to ensure fits within systems like SLURM
mem = config_utils.adjust_memory(resources.get("memory", "2G"),
1.25, "decrease", out_modifier="M").upper()
cmd = ("{samtools} sort -@ {cores} -m {mem} -O BAM {order_flag} "
"-T {tx_sort_stem}-sort -o {tx_sort_file} {in_bam}")
do.run(cmd.format(**locals()), "Sort BAM file %s: %s to %s" %
(order, os.path.basename(in_bam), os.path.basename(sort_file)))
return sort_file
def bam_already_sorted(in_bam, config, order):
return order == _get_sort_order(in_bam, config)
def _get_sort_order(in_bam, config):
for line in pysam.view("-H", in_bam).split("\r\n"):
if line.startswith("@HD"):
for keyval in line.split()[1:]:
key, val = keyval.split(":")
if key == "SO":
return val
def _get_sort_stem(in_bam, order, out_dir):
SUFFIXES = {"coordinate": ".sorted", "queryname": ".nsorted"}
sort_base = os.path.splitext(in_bam)[0]
if out_dir:
sort_base = os.path.join(out_dir, os.path.basename(sort_base))
for suffix in SUFFIXES:
sort_base = sort_base.split(suffix)[0]
return sort_base + SUFFIXES[order]
def aligner_from_header(in_bam):
"""Identify aligner from the BAM header; handling pre-aligned inputs.
"""
from bcbio.pipeline.alignment import TOOLS
with pysam.Samfile(in_bam, "rb") as bamfile:
for pg in bamfile.header.get("PG", []):
for ka in TOOLS.keys():
if pg.get("PN", "").lower().find(ka) >= 0:
return ka
def sample_name(in_bam):
"""Get sample name from BAM file.
"""
with pysam.AlignmentFile(in_bam, "rb", check_sq=False) as in_pysam:
try:
if "RG" in in_pysam.header:
return in_pysam.header["RG"][0]["SM"]
except ValueError:
return None
def estimate_read_length(bam_file, nreads=1000):
"""
estimate median read length of a SAM/BAM file
"""
with open_samfile(bam_file) as bam_handle:
reads = tz.itertoolz.take(nreads, bam_handle)
lengths = [len(x.seq) for x in reads]
return int(numpy.median(lengths))
def estimate_fragment_size(bam_file, nreads=5000):
"""
estimate median fragment size of a SAM/BAM file
"""
with open_samfile(bam_file) as bam_handle:
reads = tz.itertoolz.take(nreads, bam_handle)
# it would be good to skip spliced paired reads.
lengths = [x.template_length for x in reads if x.template_length > 0]
if not lengths:
return 0
return int(numpy.median(lengths))
def filter_primary(bam_file, data):
"""Filter reads to primary only BAM.
Removes:
- not primary alignment (0x100) 256
- supplementary alignment (0x800) 2048
"""
stem, ext = os.path.splitext(bam_file)
out_file = stem + ".primary" + ext
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cores = dd.get_num_cores(data)
cmd = ("samtools view -@ {cores} -F 2304 -b {bam_file} > {tx_out_file}")
do.run(cmd.format(**locals()), ("Filtering primary alignments in %s." %
os.path.basename(bam_file)))
return out_file
def estimate_max_mapq(in_bam, nreads=1e6):
"""Guess maximum MAPQ in a BAM file of reads with alignments
"""
with pysam.Samfile(in_bam, "rb") as work_bam:
reads = tz.take(int(nreads), work_bam)
return max([x.mapq for x in reads if not x.is_unmapped])
def convert_cufflinks_mapq(in_bam, out_bam=None):
"""Cufflinks expects the not-valid 255 MAPQ for uniquely mapped reads.
This detects the maximum mapping quality in a BAM file and sets
reads with that quality to be 255
"""
CUFFLINKSMAPQ = 255
if not out_bam:
out_bam = os.path.splitext(in_bam)[0] + "-cufflinks.bam"
if utils.file_exists(out_bam):
return out_bam
maxmapq = estimate_max_mapq(in_bam)
if maxmapq == CUFFLINKSMAPQ:
return in_bam
logger.info("Converting MAPQ scores in %s to be Cufflinks compatible." % in_bam)
with pysam.Samfile(in_bam, "rb") as in_bam_fh:
with pysam.Samfile(out_bam, "wb", template=in_bam_fh) as out_bam_fh:
for read in in_bam_fh:
if read.mapq == maxmapq and not read.is_unmapped:
read.mapq = CUFFLINKSMAPQ
out_bam_fh.write(read)
return out_bam
def convert_invalid_mapq(in_bam, out_bam=None):
"""Some aligners output 255 to denote a uniquely mapped read which is
an invalid MAPQ value according to the SAM spec. This detects
that and changes it to be 60.
"""
INVALIDMAPQ = 255
VALIDMAPQ = 60
if not out_bam:
out_bam = os.path.splitext(in_bam)[0] + "-MAPQfixed.bam"
if utils.file_exists(out_bam):
return out_bam
maxmapq = estimate_max_mapq(in_bam)
if maxmapq != INVALIDMAPQ:
return in_bam
logger.info("Converting 255 MAPQ scores in %s to 60." % in_bam)
with pysam.Samfile(in_bam, "rb") as in_bam_fh:
with pysam.Samfile(out_bam, "wb", template=in_bam_fh) as out_bam_fh:
for read in in_bam_fh:
if read.mapq == INVALIDMAPQ and not read.is_unmapped:
read.mapq = VALIDMAPQ
out_bam_fh.write(read)
return out_bam
def remove_duplicates(in_bam, data):
"""
remove duplicates from a duplicate marked BAM file
"""
base, ext = os.path.splitext(in_bam)
out_bam = base + "-noduplicates" + ext
if utils.file_exists(out_bam):
return out_bam
num_cores = dd.get_num_cores(data)
sambamba = config_utils.get_program("sambamba", data)
with file_transaction(out_bam) as tx_out_bam:
cmd = (f'{sambamba} view -h --nthreads {num_cores} -f bam -F "not duplicate" '
f'{in_bam} > {tx_out_bam}')
message = f"Removing duplicates from {in_bam}, saving as {out_bam}."
do.run(cmd, message)
index(out_bam, dd.get_config(data))
return out_bam
def count_alignments(in_bam, data, filter=None):
"""
count alignments in a BAM file passing a given filter. valid filter
strings are available in the sambamba documentation:
https://github.com/biod/sambamba/wiki/%5Bsambamba-view%5D-Filter-expression-syntax
"""
sambamba = config_utils.get_program("sambamba", dd.get_config(data))
num_cores = dd.get_num_cores(data)
if not filter:
filter_string = ""
message = f"Counting alignments in {in_bam}."
else:
filter_string = "--filter {filter}"
message = f"Counting alignments in {in_bam} matching {filter}."
cmd = f"{sambamba} view -c --nthreads {num_cores} -f bam {filter_string} {in_bam}"
logger.info(message)
result = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return int(result.stdout.decode().strip())
```
#### File: bcbio/bam/ref.py
```python
import collections
from bcbio import utils
from bcbio.pipeline import config_utils
from bcbio.provenance import do
def fasta_idx(in_file, config=None):
"""Retrieve samtools style fasta index.
"""
fasta_index = in_file + ".fai"
if not utils.file_exists(fasta_index):
samtools = config_utils.get_program("samtools", config) if config else "samtools"
cmd = "{samtools} faidx {in_file}"
do.run(cmd.format(**locals()), "samtools faidx")
return fasta_index
def file_contigs(ref_file, config=None):
"""Iterator of reference contigs and lengths from a reference file.
"""
ContigInfo = collections.namedtuple("ContigInfo", "name size")
with open(fasta_idx(ref_file, config)) as in_handle:
for line in (l for l in in_handle if l.strip()):
name, size = line.split()[:2]
yield ContigInfo(name, int(size))
```
#### File: bcbio/bed/__init__.py
```python
import pybedtools as bt
from bcbio.utils import file_exists
from bcbio import utils
def decomment(bed_file, out_file):
"""
clean a BED file
"""
if file_exists(out_file):
return out_file
with utils.open_gzipsafe(bed_file) as in_handle, open(out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("#") or line.startswith("browser") or line.startswith("track"):
continue
else:
out_handle.write(line)
return out_file
def concat(bed_files, catted=None):
"""
recursively concat a set of BED files, returning a
sorted bedtools object of the result
"""
bed_files = [x for x in bed_files if x]
if len(bed_files) == 0:
if catted:
# move to a .bed extension for downstream tools if not already
sorted_bed = catted.sort()
if not sorted_bed.fn.endswith(".bed"):
return sorted_bed.moveto(sorted_bed.fn + ".bed")
else:
return sorted_bed
else:
return catted
if not catted:
bed_files = list(bed_files)
catted = bt.BedTool(bed_files.pop())
else:
catted = catted.cat(bed_files.pop(), postmerge=False,
force_truncate=False)
return concat(bed_files, catted)
def merge(bedfiles):
"""
given a BED file or list of BED files merge them an return a bedtools object
"""
if isinstance(bedfiles, list):
catted = concat(bedfiles)
else:
catted = concat([bedfiles])
if catted:
return concat(bedfiles).sort().merge()
else:
return catted
def minimize(bed_file):
"""
strip a BED file down to its three necessary columns: chrom start end
"""
if not bed_file:
return bed_file
else:
sorted_bed = bt.BedTool(bed_file).cut(range(3)).sort()
if not sorted_bed.fn.endswith(".bed"):
return sorted_bed.moveto(sorted_bed.fn + ".bed")
else:
return sorted_bed
```
#### File: bcbio/cwl/main.py
```python
from bcbio.pipeline import run_info
from bcbio.cwl import create
def run(args):
"""Run a CWL preparation pipeline.
"""
dirs, config, run_info_yaml = run_info.prep_system(args.sample_config, args.systemconfig)
integrations = args.integrations if hasattr(args, "integrations") else {}
world = run_info.organize(dirs, config, run_info_yaml, is_cwl=True, integrations=integrations)
create.from_world(world, run_info_yaml, integrations=integrations, add_container_tag=args.add_container_tag)
```
#### File: bcbio/heterogeneity/loh.py
```python
import csv
import collections
import os
import decimal
import uuid
import pandas as pd
import six
from six import StringIO
import toolz as tz
import yaml
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import datadict as dd
# Standard sets of coordinates we always include
_COORDS = {"LOH":
{"hg38": {"HLA": ("chr6", 28510120, 33480577),
"B2M": ("chr15", 44711487, 44718877)},
"hg19": {"HLA": ("chr6", 29640000, 33120000),
"B2M": ("chr15", 45003675, 45011075)},
"GRCh37": {"HLA": ("6", 29640000, 33120000),
"B2M": ("15", 45003675, 45011075)}}}
def get_coords(data):
"""Retrieve coordinates of genes of interest for prioritization.
Can read from CIViC input data or a supplied BED file of chrom, start, end
and gene information.
"""
for category, vtypes in [("LOH", {"LOSS", "HETEROZYGOSITY"}),
("amplification", {"AMPLIFICATION"})]:
out = tz.get_in([category, dd.get_genome_build(data)], _COORDS, {})
priority_file = dd.get_svprioritize(data)
if priority_file:
if os.path.basename(priority_file).find("civic") >= 0:
for chrom, start, end, gene in _civic_regions(priority_file, vtypes, dd.get_disease(data)):
out[gene] = (chrom, start, end)
elif os.path.basename(priority_file).find(".bed") >= 0:
for line in utils.open_gzipsafe(priority_file):
parts = line.strip().split("\t")
if len(parts) >= 4:
chrom, start, end, gene = parts[:4]
out[gene] = (chrom, int(start), int(end))
yield category, out
def _matches(tocheck, target):
for t in target:
t = t.lower()
for c in tocheck:
if c.lower().find(t) >= 0:
return True
def _civic_regions(civic_file, variant_types=None, diseases=None, drugs=None):
"""Retrieve gene regions and names filtered by variant_types and diseases.
"""
if isinstance(diseases, six.string_types):
diseases = [diseases]
with utils.open_gzipsafe(civic_file) as in_handle:
reader = csv.reader(in_handle, delimiter="\t")
for chrom, start, end, info_str in reader:
info = edn_loads(info_str)
if not variant_types or _matches(info["support"]["variants"], variant_types):
if not diseases or _matches(info["support"]["diseases"], diseases):
if not drugs or _matches(info["support"]["drugs"], drugs):
yield (chrom, int(start), int(end), list(info["name"])[0])
def summary_status(call, data):
"""Retrieve status in regions of interest, along with heterogeneity metrics.
Provides output with overall purity and ploidy, along with region
specific calls.
"""
out_file = None
if call.get("vrn_file") and os.path.exists(call.get("vrn_file")):
out_file = os.path.join(os.path.dirname(call["vrn_file"]),
"%s-%s-lohsummary.yaml" % (dd.get_sample_name(data), call["variantcaller"]))
if not utils.file_uptodate(out_file, call["vrn_file"]):
out = {}
if call["variantcaller"] == "titancna":
out.update(_titancna_summary(call, data))
pass
elif call["variantcaller"] == "purecn":
out.update(_purecn_summary(call, data))
if out:
out["description"] = dd.get_sample_name(data)
out["variantcaller"] = call["variantcaller"]
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
return out_file if out_file and os.path.exists(out_file) else None
def _check_copy_number_changes(svtype, cn, minor_cn, data):
"""Check if copy number changes match the expected svtype.
"""
if svtype == "LOH" and minor_cn == 0:
return svtype
elif svtype == "amplification" and cn > dd.get_ploidy(data):
return svtype
else:
return "std"
def _to_cn(v):
return int(round(float(v)))
def _titancna_summary(call, data):
"""Summarize purity, ploidy and LOH for TitanCNA.
"""
out = {}
for svtype, coords in get_coords(data):
cur_calls = {k: collections.defaultdict(int) for k in coords.keys()}
with open(call["subclones"]) as in_handle:
header = in_handle.readline().strip().split()
for line in in_handle:
val = dict(zip(header, line.strip().split()))
start = int(val["Start_Position.bp."])
end = int(val["End_Position.bp."])
for region, cur_coords in coords.items():
if val["Chromosome"] == cur_coords[0] and are_overlapping((start, end), cur_coords[1:]):
cur_calls[region][_check_copy_number_changes(svtype, _to_cn(val["Copy_Number"]),
_to_cn(val["MinorCN"]), data)] += 1
out[svtype] = {r: _merge_cn_calls(c, svtype) for r, c in cur_calls.items()}
with open(call["hetsummary"]) as in_handle:
vals = dict(zip(in_handle.readline().strip().split("\t"), in_handle.readline().strip().split("\t")))
out["purity"] = vals["purity"]
out["ploidy"] = vals["ploidy"]
return out
def _purecn_summary(call, data):
"""Summarize purity, ploidy and LOH for PureCN.
"""
out = {}
loh_calls = pd.read_csv(call["loh"])
for svtype, coords in get_coords(data):
cur_calls = {k: collections.defaultdict(int) for k in coords.keys()}
for rowid, row in loh_calls.iterrows():
_, chrom, start, end, _, cn, minor_cn = row.iloc[0:7]
if pd.isnull(cn) or pd.isnull(minor_cn):
# NA copy number calls - skip
continue
start = int(start)
end = int(end)
for region, cur_coords in coords.items():
if chrom == cur_coords[0] and are_overlapping((start, end), cur_coords[1:]):
cur_calls[region][_check_copy_number_changes(svtype, _to_cn(cn), _to_cn(minor_cn), data)] += 1
out[svtype] = {r: _merge_cn_calls(c, svtype) for r, c in cur_calls.items()}
with open(call["hetsummary"]) as in_handle:
vals = dict(zip(in_handle.readline().strip().replace('"', '').split(","),
in_handle.readline().strip().split(",")))
out["purity"] = vals["Purity"]
out["ploidy"] = vals["Ploidy"]
return out
def _merge_cn_calls(calls, svtype):
if calls[svtype]:
return "mixed" if calls["std"] else svtype
else:
return "no"
def are_overlapping(r, s):
"""Test if two coordinates overlap.
https://stackoverflow.com/a/27182551
"""
return r[1] >= s[0] and s[1] >= r[0]
# ## EDN parser
# Thanks to https://github.com/sunng87/pyclj
# Slightly adapter to avoid external dependencies
def edn_load(fp):
decoder = CljDecoder(fp)
return decoder.decode()
def edn_loads(s):
buf = StringIO(s)
result = edn_load(buf)
buf.close()
return result
def _number(v):
if v.endswith('M'):
out = decimal.Decimal(v[:-1])
else:
try:
out = int(v)
except ValueError as e:
out = float(v)
return out
_STOP_CHARS = [" ", ",", "\n", "\r", "\t"]
_COLL_OPEN_CHARS = ["#", "[", "{", "("]
_COLL_CLOSE_CHARS = ["]", "}", ")"]
_EXTRA_NUM_CHARS = ["-", "+", ".", "e", "E", "M"]
class CljDecoder(object):
def __init__(self, fd):
self.fd = fd
self.cur_line = 1
self.cur_pos = 1
self.value_stack = []
self.terminator = None ## for collection type
def decode(self):
while True:
v = self.__read_token()
if len(self.value_stack) == 0:
return v
def __seek_back(self, size):
self.fd.seek(self.fd.tell()-size, 0)
def __read_and_back(self, size):
s = self.fd.read(size)
self.__seek_back(size)
return s
def __get_type_from_char(self, c):
"""return a tuple of type information
* type name
* a flag to indicate if it's a collection
"""
if c.isdigit() or c =='-':
return ("number", False, None)
elif c == 't' or c == 'f': ## true/false
return ("boolean", False, None)
elif c == 'n': ## nil
return ("nil", False, None)
elif c == '\\' :
return ("char", False, None)
elif c == ':':
return ("keyword", False, None)
elif c == '"':
return ("string", False, None)
elif c == '#':
if self.__read_and_back(1) == '{':
return ("set", True, "}")
if self.__read_and_back(1) == ':':
return ("namespaced_dict", True, "}")
if self.__read_and_back(4) == 'inst':
return ("datetime", False, None)
if self.__read_and_back(4) == 'uuid':
return ("uuid", False, None)
elif c == '{':
return ("dict", True, "}")
elif c == '(':
return ("list", True, ")")
elif c == '[':
return ('list', True, "]")
return (None, False, None)
def __read_fd(self, size):
if size == 1:
c = self.fd.read(size)
if c == '\n':
self.cur_pos = 0
self.cur_line = self.cur_line + 1
return c
else:
self.cur_pos = self.cur_pos + size
cs = self.fd.read(size)
return cs
def __read_token(self):
c = self.__read_fd(1)
## skip all stop chars if necessary
while c in _STOP_CHARS:
c = self.__read_fd(1)
## raise exception when unexpected EOF found
if c == '':
raise ValueError("Unexpected EOF")
t, coll, term = self.__get_type_from_char(c)
if coll:
## move cursor
if t == "set":
## skip {
self.__read_fd(1)
namespace = None
if t == "namespaced_dict":
## skip :
self.__read_fd(1)
## get namespace
buf = []
while c != '{':
c = self.__read_fd(1)
buf.append(c)
namespace = ''.join(buf[:-1])
self.terminator = term
self.value_stack.append(([], self.terminator, t, namespace))
return None
else:
v = None ## token value
e = None ## end char
r = True ## the token contains data or not
if t == "boolean":
if c == 't':
chars = self.__read_fd(4)
if chars[:3] != 'rue':
raise ValueError('Expect true, got t%s at line %d, col %d' % (chars[:3], self.cur_line, self.cur_pos))
e = chars[-1]
v = True
else:
chars = self.__read_fd(5)
if chars[:4] != 'alse':
raise ValueError('Expect true, got t%s at line %d, col %d' % (chars[:3], self.cur_line, self.cur_pos))
e = chars[-1]
v = False
elif t == "char":
buf = []
while c is not self.terminator and c is not "" and c not in _STOP_CHARS:
c = self.__read_fd(1)
buf.append(c)
e = c
v = ''.join(buf[:-1])
elif t == "nil":
chars = self.__read_fd(3)
if chars[:2] != 'il':
raise ValueError('Expect nil, got n%s at line %d, col %d' % (chars[:2], self.cur_line, self.cur_pos))
e = chars[-1]
v = None
elif t == "number":
buf = []
while c.isdigit() or (c in _EXTRA_NUM_CHARS):
buf.append(c)
c = self.__read_fd(1)
e = c
numstr = ''.join(buf)
v = _number(numstr)
## special case for
## [23[12]]
## this is a valid clojure form
if e in _COLL_OPEN_CHARS:
self.__seek_back(1)
elif t == "keyword":
buf = [] ##skip the leading ":"
while c is not self.terminator and c is not "" and c not in _STOP_CHARS:
c = self.__read_fd(1)
buf.append(c)
e = c
v = ''.join(buf[:-1])
elif t == "string":
buf = []
cp = c = self.__read_fd(1) ## to check escaping character \
while not(c == '"' and cp != '\\'):
buf.append(c)
cp = c
c = self.__read_fd(1)
e = c
v = unicode(''.join(buf).decode('unicode-escape'))
elif t == "datetime":
## skip "inst"
self.__read_fd(4)
## read next value as string
s = self.__read_token()
if not isinstance(s, six.string_types):
raise ValueError('Str expected, but got %s' % str(s))
## remove read string from the value_stack
if len(self.value_stack) > 0:
self.value_stack[-1][0].pop()
e = '"'
v = pyrfc3339.parse(s)
elif t == "uuid":
## skip "uuid"
self.__read_fd(4)
## read next value as string
s = self.__read_token()
if not isinstance(s, six.string_types):
raise ValueError('Str expected, but got %s' % str(s))
## remove read string from the value_stack
if len(self.value_stack) > 0:
self.value_stack[-1][0].pop()
e = '"'
v = uuid.UUID(s)
else:
if c not in _COLL_CLOSE_CHARS:
raise ValueError('Unexpected char: "%s" at line %d, col %d' % (c, self.cur_line, self.cur_pos))
r = False
e = c
if e == self.terminator:
current_scope, _, container, namespace = self.value_stack.pop()
if r:
current_scope.append(v)
if container == "set":
try:
v = set(current_scope)
except TypeError:
v = tuple(current_scope)
elif container == "list":
v = current_scope
elif container in ["dict", "namespaced_dict"]:
v = {}
for i in range(0, len(current_scope), 2):
key = '%s/%s' % (namespace, current_scope[i]) if namespace else current_scope[i]
v[key] = current_scope[i+1]
r = True
if r and len(self.value_stack) > 0:
self.value_stack[-1][0].append(v)
self.terminator = self.value_stack[-1][1]
return v
```
#### File: bcbio/pipeline/fastq.py
```python
import os
import shutil
from bcbio import bam, broad, utils
from bcbio.bam import fastq
from bcbio.distributed import objectstore
from bcbio.pipeline import alignment
from bcbio.pipeline import datadict as dd
from bcbio.utils import file_exists, safe_makedir, splitext_plus
from bcbio.provenance import do
from bcbio.distributed.transaction import file_transaction
from bcbio.ngsalign import alignprep
def get_fastq_files(data):
"""Retrieve fastq files for the given lane, ready to process.
"""
assert "files" in data, "Did not find `files` in input; nothing to process"
ready_files = []
should_gzip = True
# Bowtie does not accept gzipped fastq
if 'bowtie' in data['reference'].keys():
should_gzip = False
for fname in data["files"]:
if fname.endswith(".bam"):
if _pipeline_needs_fastq(data["config"], data):
ready_files = convert_bam_to_fastq(fname, data["dirs"]["work"],
data, data["dirs"], data["config"])
else:
ready_files = [fname]
elif objectstore.is_remote(fname):
ready_files.append(fname)
# Trimming does quality conversion, so if not doing that, do an explicit conversion
elif not(dd.get_trim_reads(data)) and dd.get_quality_format(data) != "standard":
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "fastq_convert"))
ready_files.append(fastq.groom(fname, data, out_dir=out_dir))
else:
ready_files.append(fname)
ready_files = [x for x in ready_files if x is not None]
if should_gzip:
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "fastq"))
ready_files = [_gzip_fastq(x, out_dir) for x in ready_files]
for in_file in ready_files:
if not objectstore.is_remote(in_file):
assert os.path.exists(in_file), "%s does not exist." % in_file
return ready_files
def _gzip_fastq(in_file, out_dir=None):
"""
gzip a fastq file if it is not already gzipped, handling conversion
from bzip to gzipped files
"""
if fastq.is_fastq(in_file) and not objectstore.is_remote(in_file):
if utils.is_bzipped(in_file):
return _bzip_gzip(in_file, out_dir)
elif not utils.is_gzipped(in_file):
if out_dir:
gzipped_file = os.path.join(out_dir, os.path.basename(in_file) + ".gz")
else:
gzipped_file = in_file + ".gz"
if file_exists(gzipped_file):
return gzipped_file
message = "gzipping {in_file} to {gzipped_file}.".format(
in_file=in_file, gzipped_file=gzipped_file)
with file_transaction(gzipped_file) as tx_gzipped_file:
do.run("gzip -c {in_file} > {tx_gzipped_file}".format(**locals()),
message)
return gzipped_file
return in_file
def _bzip_gzip(in_file, out_dir=None):
"""
convert from bz2 to gz
"""
if not utils.is_bzipped(in_file):
return in_file
base, _ = os.path.splitext(in_file)
if out_dir:
gzipped_file = os.path.join(out_dir, os.path.basename(base) + ".gz")
else:
gzipped_file = base + ".gz"
if (fastq.is_fastq(base) and not objectstore.is_remote(in_file)):
if file_exists(gzipped_file):
return gzipped_file
message = "gzipping {in_file} to {gzipped_file}.".format(
in_file=in_file, gzipped_file=gzipped_file)
with file_transaction(gzipped_file) as tx_gzipped_file:
do.run("bunzip2 -c {in_file} | gzip > {tx_gzipped_file}".format(**locals()), message)
return gzipped_file
return in_file
def _pipeline_needs_fastq(config, data):
"""Determine if the pipeline can proceed with a BAM file, or needs fastq conversion.
"""
aligner = config["algorithm"].get("aligner")
support_bam = aligner in alignment.metadata.get("support_bam", [])
return aligner and not support_bam
def convert_bam_to_fastq(in_file, work_dir, data, dirs, config):
"""Convert BAM input file into FASTQ files.
"""
return alignprep.prep_fastq_inputs([in_file], data)
def merge(files, out_file, config):
"""merge smartly fastq files. It recognizes paired fastq files."""
pair1 = [fastq_file[0] for fastq_file in files]
if len(files[0]) > 1:
path = splitext_plus(out_file)
pair1_out_file = path[0] + "_R1" + path[1]
pair2 = [fastq_file[1] for fastq_file in files]
pair2_out_file = path[0] + "_R2" + path[1]
_merge_list_fastqs(pair1, pair1_out_file, config)
_merge_list_fastqs(pair2, pair2_out_file, config)
return [pair1_out_file, pair2_out_file]
else:
return _merge_list_fastqs(pair1, out_file, config)
def _merge_list_fastqs(files, out_file, config):
"""merge list of fastq files into one"""
if not all(map(fastq.is_fastq, files)):
raise ValueError("Not all of the files to merge are fastq files: %s " % (files))
assert all(map(utils.file_exists, files)), ("Not all of the files to merge "
"exist: %s" % (files))
if not file_exists(out_file):
files = [_gzip_fastq(fn) for fn in files]
if len(files) == 1:
if "remove_source" in config and config["remove_source"]:
shutil.move(files[0], out_file)
else:
os.symlink(files[0], out_file)
return out_file
with file_transaction(out_file) as file_txt_out:
files_str = " ".join(list(files))
cmd = "cat {files_str} > {file_txt_out}".format(**locals())
do.run(cmd, "merge fastq files %s" % files)
return out_file
```
#### File: bcbio/pipeline/merge.py
```python
import os
import shutil
import subprocess
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do, system
def combine_fastq_files(in_files, work_dir, config):
if len(in_files) == 1:
return in_files[0]
else:
cur1, cur2 = in_files[0]
out1 = os.path.join(work_dir, os.path.basename(cur1))
out2 = os.path.join(work_dir, os.path.basename(cur2)) if cur2 else None
if not os.path.exists(out1):
with open(out1, "a") as out_handle:
for (cur1, _) in in_files:
with open(cur1) as in_handle:
shutil.copyfileobj(in_handle, out_handle)
if out2 and not os.path.exists(out2):
with open(out2, "a") as out_handle:
for (_, cur2) in in_files:
with open(cur2) as in_handle:
shutil.copyfileobj(in_handle, out_handle)
for f1, f2 in in_files:
utils.save_diskspace(f1, "fastq merged to %s" % out1, config)
if f2:
utils.save_diskspace(f2, "fastq merged to %s" % out2, config)
return out1, out2
def merge_bam_files(bam_files, work_dir, data, out_file=None, batch=None):
"""Merge multiple BAM files from a sample into a single BAM for processing.
Checks system open file limit and merges in batches if necessary to avoid
file handle limits.
"""
out_file = _merge_outfile_fname(out_file, bam_files, work_dir, batch)
if not utils.file_exists(out_file):
if len(bam_files) == 1 and bam.bam_already_sorted(bam_files[0], data["config"], "coordinate"):
with file_transaction(data, out_file) as tx_out_file:
_create_merge_filelist(bam_files, tx_out_file, data["config"])
out_file = bam_files[0]
samtools = config_utils.get_program("samtools", data["config"])
do.run('{} quickcheck -v {}'.format(samtools, out_file),
"Check for valid merged BAM after transfer")
else:
with tx_tmpdir(data) as tmpdir:
with utils.chdir(tmpdir):
with file_transaction(data, out_file) as tx_out_file:
tx_bam_file_list = _create_merge_filelist(bam_files, tx_out_file, data["config"])
samtools = config_utils.get_program("samtools", data["config"])
resources = config_utils.get_resources("samtools", data["config"])
num_cores = dd.get_num_cores(data)
# Aim for 3.5Gb/core memory for BAM merging
num_cores = config_utils.adjust_cores_to_mb_target(
3500, resources.get("memory", "2G"), num_cores)
max_mem = config_utils.adjust_memory(resources.get("memory", "1G"),
2, "decrease").upper()
if dd.get_mark_duplicates(data):
cmd = _biobambam_merge_dedup_maxcov(data)
else:
cmd = _biobambam_merge_maxcov(data)
do.run(cmd.format(**locals()), "Merge bam files to %s" % os.path.basename(out_file),
None)
do.run('{} quickcheck -v {}'.format(samtools, tx_out_file),
"Check for valid merged BAM")
do.run('{} quickcheck -v {}'.format(samtools, out_file),
"Check for valid merged BAM after transfer")
_finalize_merge(out_file, bam_files, data["config"])
bam.index(out_file, data["config"])
return out_file
def _create_merge_filelist(bam_files, base_file, config):
"""Create list of input files for merge, ensuring all files are valid.
"""
bam_file_list = "%s.list" % os.path.splitext(base_file)[0]
samtools = config_utils.get_program("samtools", config)
with open(bam_file_list, "w") as out_handle:
for f in sorted(bam_files):
do.run('{} quickcheck -v {}'.format(samtools, f),
"Ensure integrity of input merge BAM files")
out_handle.write("%s\n" % f)
return bam_file_list
def _merge_outfile_fname(out_file, bam_files, work_dir, batch):
"""Derive correct name of BAM file based on batching.
"""
if out_file is None:
out_file = os.path.join(work_dir, os.path.basename(sorted(bam_files)[0]))
if batch is not None:
base, ext = os.path.splitext(out_file)
out_file = "%s-b%s%s" % (base, batch, ext)
return out_file
def _finalize_merge(out_file, bam_files, config):
"""Handle indexes and cleanups of merged BAM and input files.
"""
# Ensure timestamps are up to date on output file and index
# Works around issues on systems with inconsistent times
for ext in ["", ".bai"]:
if os.path.exists(out_file + ext):
subprocess.check_call(["touch", out_file + ext])
for b in bam_files:
utils.save_diskspace(b, "BAM merged to %s" % out_file, config)
def _biobambam_merge_dedup_maxcov(data):
"""Combine query sorted BAM files, de-duplicate, sort and truncate to maximum coverage.
Handles split files, checking for large scale whole genome coverage where
we want to downsample to a maximum coverage.
"""
ds_cmd = bam.get_maxcov_downsample_cl(data, "bamsormadup")
return ("bamcat level=0 tmpfile={tx_out_file}-bammerge `cat {tx_bam_file_list}` | "
"bamsormadup threads={num_cores} "
"tmpfile={tx_out_file}-bamsormaduptmp %s > {tx_out_file}" % ds_cmd)
def _biobambam_merge_maxcov(data):
"""Combine query sorted BAM files, sort and truncate to maximum coverage.
No de-duplication.
"""
ds_cmd = bam.get_maxcov_downsample_cl(data, "bamsormadup")
return ("bammerge IL={tx_bam_file_list} tmpfile={tx_out_file}-merge %s > {tx_out_file}" % ds_cmd)
```
#### File: bcbio/pipeline/shared.py
```python
import os
from contextlib import contextmanager
import functools
import operator
import tempfile
import pybedtools
import pysam
import six
import toolz as tz
from bcbio import bam, broad, utils
from bcbio.bam import ref
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from bcbio.utils import file_exists, save_diskspace
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.provenance import do
from functools import reduce
# ## Split/Combine helpers
def combine_bam(in_files, out_file, config):
"""Parallel target to combine multiple BAM files.
"""
runner = broad.runner_from_path("picard", config)
runner.run_fn("picard_merge", in_files, out_file)
for in_file in in_files:
save_diskspace(in_file, "Merged into {0}".format(out_file), config)
bam.index(out_file, config)
return out_file
def get_noalt_contigs(data):
"""Retrieve contigs without alternatives as defined in bwa *.alts files.
If no alt files present (when we're not aligning with bwa), work around
with standard set of alts based on hg38 -- anything with HLA, _alt or
_decoy in the name.
"""
alts = set([])
alt_files = [f for f in tz.get_in(["reference", "bwa", "indexes"], data, []) if f.endswith("alt")]
if alt_files:
for alt_file in alt_files:
with open(alt_file) as in_handle:
for line in in_handle:
if not line.startswith("@"):
alts.add(line.split()[0].strip())
else:
for contig in ref.file_contigs(dd.get_ref_file(data)):
if ("_alt" in contig.name or "_decoy" in contig.name or
contig.name.startswith("HLA-") or ":" in contig.name):
alts.add(contig.name)
return [c for c in ref.file_contigs(dd.get_ref_file(data)) if c.name not in alts]
def write_nochr_reads(in_file, out_file, config):
"""Write a BAM file of reads that are not mapped on a reference chromosome.
This is useful for maintaining non-mapped reads in parallel processes
that split processing by chromosome.
"""
if not file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
samtools = config_utils.get_program("samtools", config)
cmd = "{samtools} view -b -f 4 {in_file} > {tx_out_file}"
do.run(cmd.format(**locals()), "Select unmapped reads")
return out_file
def write_noanalysis_reads(in_file, region_file, out_file, config):
"""Write a BAM file of reads in the specified region file that are not analyzed.
We want to get only reads not in analysis regions but also make use of
the BAM index to perform well on large files. The tricky part is avoiding
command line limits. There is a nice discussion on SeqAnswers:
http://seqanswers.com/forums/showthread.php?t=29538
sambamba supports intersection via an input BED file so avoids command line
length issues.
"""
if not file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
bedtools = config_utils.get_program("bedtools", config)
sambamba = config_utils.get_program("sambamba", config)
cl = ("{sambamba} view -f bam -l 0 -L {region_file} {in_file} | "
"{bedtools} intersect -abam - -b {region_file} -f 1.0 -nonamecheck"
"> {tx_out_file}")
do.run(cl.format(**locals()), "Select unanalyzed reads")
return out_file
def subset_bam_by_region(in_file, region, config, out_file_base=None):
"""Subset BAM files based on specified chromosome region.
"""
if out_file_base is not None:
base, ext = os.path.splitext(out_file_base)
else:
base, ext = os.path.splitext(in_file)
out_file = "%s-subset%s%s" % (base, region, ext)
if not file_exists(out_file):
with pysam.Samfile(in_file, "rb") as in_bam:
target_tid = in_bam.gettid(region)
assert region is not None, \
"Did not find reference region %s in %s" % \
(region, in_file)
with file_transaction(config, out_file) as tx_out_file:
with pysam.Samfile(tx_out_file, "wb", template=in_bam) as out_bam:
for read in in_bam:
if read.tid == target_tid:
out_bam.write(read)
return out_file
def subset_bed_by_chrom(in_file, chrom, data, out_dir=None):
"""Subset a BED file to only have items from the specified chromosome.
"""
if out_dir is None:
out_dir = os.path.dirname(in_file)
base, ext = os.path.splitext(os.path.basename(in_file))
out_file = os.path.join(out_dir, "%s-%s%s" % (base, chrom, ext))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
_rewrite_bed_with_chrom(in_file, tx_out_file, chrom)
return out_file
def _rewrite_bed_with_chrom(in_file, out_file, chrom):
with open(in_file) as in_handle:
with open(out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("%s\t" % chrom):
out_handle.write(line)
def _subset_bed_by_region(in_file, out_file, regions, ref_file, do_merge=True):
orig_bed = pybedtools.BedTool(in_file)
region_bed = pybedtools.BedTool("\n".join(["%s\t%s\t%s" % (c, s, e) for c, s, e in regions]) + "\n",
from_string=True)
sort_kwargs = {"faidx": ref.fasta_idx(ref_file)} if ref_file else {}
if do_merge:
orig_bed.intersect(region_bed, nonamecheck=True).saveas().sort(**sort_kwargs).saveas().\
filter(lambda x: len(x) >= 1).saveas().merge().saveas(out_file)
else:
orig_bed.intersect(region_bed, nonamecheck=True).saveas().sort(**sort_kwargs).saveas().\
filter(lambda x: len(x) >= 1).saveas(out_file)
def remove_lcr_regions(orig_bed, items):
"""If configured and available, update a BED file to remove low complexity regions.
"""
lcr_bed = tz.get_in(["genome_resources", "variation", "lcr"], items[0])
if lcr_bed and os.path.exists(lcr_bed) and "lcr" in get_exclude_regions(items):
return _remove_regions(orig_bed, [lcr_bed], "nolcr", items[0])
else:
return orig_bed
def remove_polyx_regions(in_file, items):
"""Remove polyX stretches, contributing to long variant runtimes.
"""
ex_bed = tz.get_in(["genome_resources", "variation", "polyx"], items[0])
if ex_bed and os.path.exists(ex_bed):
return _remove_regions(in_file, [ex_bed], "nopolyx", items[0])
else:
return in_file
def add_highdepth_genome_exclusion(items):
"""Add exclusions to input items to avoid slow runtimes on whole genomes.
"""
out = []
for d in items:
d = utils.deepish_copy(d)
if dd.get_coverage_interval(d) == "genome":
e = dd.get_exclude_regions(d)
if "highdepth" not in e:
e.append("highdepth")
d = dd.set_exclude_regions(d, e)
out.append(d)
return out
def remove_highdepth_regions(in_file, items):
"""Remove high depth regions from a BED file for analyzing a set of calls.
Tries to avoid spurious errors and slow run times in collapsed repeat regions.
Also adds ENCODE blacklist regions which capture additional collapsed repeats
around centromeres.
"""
encode_bed = tz.get_in(["genome_resources", "variation", "encode_blacklist"], items[0])
if encode_bed and os.path.exists(encode_bed):
return _remove_regions(in_file, [encode_bed], "glimit", items[0])
else:
return in_file
def _remove_regions(in_file, remove_beds, ext, data):
"""Subtract a list of BED files from an input BED.
General approach handling none, one and more remove_beds.
"""
from bcbio.variation import bedutils
out_file = "%s-%s.bed" % (utils.splitext_plus(in_file)[0], ext)
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
with bedtools_tmpdir(data):
if len(remove_beds) == 0:
to_remove = None
elif len(remove_beds) == 1:
to_remove = remove_beds[0]
else:
to_remove = "%s-all.bed" % utils.splitext_plus(tx_out_file)[0]
with open(to_remove, "w") as out_handle:
for b in remove_beds:
with utils.open_gzipsafe(b) as in_handle:
for line in in_handle:
parts = line.split("\t")
out_handle.write("\t".join(parts[:4]).rstrip() + "\n")
if utils.file_exists(to_remove):
to_remove = bedutils.sort_merge(to_remove, data)
if to_remove and utils.file_exists(to_remove):
cmd = "bedtools subtract -nonamecheck -a {in_file} -b {to_remove} > {tx_out_file}"
do.run(cmd.format(**locals()), "Remove problematic regions: %s" % ext)
else:
utils.symlink_plus(in_file, out_file)
return out_file
@contextmanager
def bedtools_tmpdir(data):
with tx_tmpdir(data) as tmpdir:
orig_tmpdir = tempfile.gettempdir()
pybedtools.set_tempdir(tmpdir)
yield
if orig_tmpdir and os.path.exists(orig_tmpdir):
pybedtools.set_tempdir(orig_tmpdir)
else:
tempfile.tempdir = None
def get_exclude_regions(items):
"""Retrieve regions to exclude from a set of items.
Includes back compatibility for older custom ways of specifying different
exclusions.
"""
def _get_sample_excludes(d):
excludes = dd.get_exclude_regions(d)
# back compatible
if tz.get_in(("config", "algorithm", "remove_lcr"), d, False):
excludes.append("lcr")
return excludes
out = reduce(operator.add, [_get_sample_excludes(d) for d in items])
return sorted(list(set(out)))
def remove_exclude_regions(f):
"""Remove regions to exclude based on configuration: polyA, LCR, high depth.
"""
exclude_fns = {"lcr": remove_lcr_regions, "highdepth": remove_highdepth_regions,
"polyx": remove_polyx_regions}
@functools.wraps(f)
def wrapper(variant_regions, region, out_file, items=None, do_merge=True, data=None):
region_bed = f(variant_regions, region, out_file, items, do_merge, data)
if region_bed and isinstance(region_bed, six.string_types) and os.path.exists(region_bed) and items:
for e in get_exclude_regions(items):
if e in exclude_fns:
region_bed = exclude_fns[e](region_bed, items)
return region_bed
return wrapper
def to_multiregion(region):
"""Convert a single region or multiple region specification into multiregion list.
If a single region (chrom, start, end), returns [(chrom, start, end)]
otherwise returns multiregion.
"""
assert isinstance(region, (list, tuple)), region
if isinstance(region[0], (list, tuple)):
return region
else:
assert len(region) == 3
return [tuple(region)]
@remove_exclude_regions
def subset_variant_regions(variant_regions, region, out_file, items=None, do_merge=True, data=None):
"""Return BED file subset by a specified chromosome region.
variant_regions is a BED file, region is a chromosome name or tuple
of (name, start, end) for a genomic region.
"""
if region is None:
return variant_regions
elif variant_regions is None:
return region
elif not isinstance(region, (list, tuple)) and region.find(":") > 0:
raise ValueError("Partial chromosome regions not supported")
else:
merge_text = "-unmerged" if not do_merge else ""
subset_file = "{0}".format(utils.splitext_plus(out_file)[0])
subset_file += "%s-regions.bed" % (merge_text)
if not os.path.exists(subset_file):
data = items[0] if items else data
with file_transaction(data, subset_file) as tx_subset_file:
if isinstance(region, (list, tuple)):
_subset_bed_by_region(variant_regions, tx_subset_file, to_multiregion(region),
dd.get_ref_file(data), do_merge=do_merge)
else:
_rewrite_bed_with_chrom(variant_regions, tx_subset_file, region)
if os.path.getsize(subset_file) == 0:
return region
else:
return subset_file
```
#### File: bcbio/qc/damage.py
```python
import os
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from bcbio.provenance import do
from bcbio.variation import vcfutils
from bcbio.qc import variant
def run(bam_file, data, out_dir):
out = {}
vcinfo = variant.get_active_vcinfo(data, use_ensemble=False)
dkfzbiasfilter = config_utils.get_program("dkfzbiasfilter_summarize.py", data)
if vcinfo and vcfutils.vcf_has_variants(vcinfo["vrn_file"]):
out_file = os.path.join(utils.safe_makedir(out_dir),
"%s-damage.yaml" % (dd.get_sample_name(data)))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [dkfzbiasfilter, "--sample=%s" % dd.get_sample_name(data),
"--outfile=%s" % tx_out_file, vcinfo["vrn_file"]]
do.run(cmd, "Summarize damage filtering")
if utils.file_exists(out_file):
out["base"] = out_file
return out
```
#### File: bcbio/qc/qsignature.py
```python
import os
import shutil
import subprocess
import xml.etree.ElementTree as ET
import pysam
import toolz as tz
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.provenance import do
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
def run(bam_file, data, out_dir):
""" Run SignatureGenerator to create normalize vcf that later will be input of qsignature_summary
:param bam_file: (str) path of the bam_file
:param data: (list) list containing the all the dictionary
for this sample
:param out_dir: (str) path of the output
:returns: (string) output normalized vcf file
"""
qsig = config_utils.get_program("qsignature", data["config"])
res_qsig = config_utils.get_resources("qsignature", data["config"])
jvm_opts = " ".join(res_qsig.get("jvm_opts", ["-Xms750m", "-Xmx8g"]))
if not qsig:
logger.info("There is no qsignature tool. Skipping...")
return None
position = dd.get_qsig_file(data)
mixup_check = dd.get_mixup_check(data)
if mixup_check and mixup_check.startswith("qsignature"):
utils.safe_makedir(out_dir)
if not position:
logger.info("There is no qsignature for this species: %s"
% tz.get_in(['genome_build'], data))
return None
if mixup_check == "qsignature_full":
down_bam = bam_file
else:
down_bam = _slice_bam_chr21(bam_file, data)
position = _slice_vcf_chr21(position, out_dir)
out_name = os.path.basename(down_bam).replace("bam", "qsig.vcf")
out_file = os.path.join(out_dir, out_name)
log_file = os.path.join(out_dir, "qsig.log")
cores = dd.get_cores(data)
base_cmd = ("{qsig} {jvm_opts} "
"org.qcmg.sig.SignatureGenerator "
"--noOfThreads {cores} "
"-log {log_file} -i {position} "
"-i {down_bam} ")
if not os.path.exists(out_file):
file_qsign_out = "{0}.qsig.vcf".format(down_bam)
do.run(base_cmd.format(**locals()), "qsignature vcf generation: %s" % dd.get_sample_name(data))
if os.path.exists(file_qsign_out):
with file_transaction(data, out_file) as file_txt_out:
shutil.move(file_qsign_out, file_txt_out)
else:
raise IOError("File doesn't exist %s" % file_qsign_out)
return out_file
return None
def summary(*samples):
"""Run SignatureCompareRelatedSimple module from qsignature tool.
Creates a matrix of pairwise comparison among samples. The
function will not run if the output exists
:param samples: list with only one element containing all samples information
:returns: (dict) with the path of the output to be joined to summary
"""
warnings, similar = [], []
qsig = config_utils.get_program("qsignature", samples[0][0]["config"])
if not qsig:
return [[]]
res_qsig = config_utils.get_resources("qsignature", samples[0][0]["config"])
jvm_opts = " ".join(res_qsig.get("jvm_opts", ["-Xms750m", "-Xmx8g"]))
work_dir = samples[0][0]["dirs"]["work"]
count = 0
for data in samples:
data = data[0]
vcf = tz.get_in(["summary", "qc", "qsignature", "base"], data)
if vcf:
count += 1
vcf_name = dd.get_sample_name(data) + ".qsig.vcf"
out_dir = utils.safe_makedir(os.path.join(work_dir, "qsignature"))
if not os.path.lexists(os.path.join(out_dir, vcf_name)):
os.symlink(vcf, os.path.join(out_dir, vcf_name))
if count > 0:
qc_out_dir = utils.safe_makedir(os.path.join(work_dir, "qc", "qsignature"))
out_file = os.path.join(qc_out_dir, "qsignature.xml")
out_ma_file = os.path.join(qc_out_dir, "qsignature.ma")
out_warn_file = os.path.join(qc_out_dir, "qsignature.warnings")
log = os.path.join(work_dir, "qsignature", "qsig-summary.log")
if not os.path.exists(out_file):
with file_transaction(samples[0][0], out_file) as file_txt_out:
base_cmd = ("{qsig} {jvm_opts} "
"org.qcmg.sig.SignatureCompareRelatedSimple "
"-log {log} -dir {out_dir} "
"-o {file_txt_out} ")
do.run(base_cmd.format(**locals()), "qsignature score calculation")
error, warnings, similar = _parse_qsignature_output(out_file, out_ma_file,
out_warn_file, samples[0][0])
return [{'total samples': count,
'similar samples pairs': len(similar),
'warnings samples pairs': len(warnings),
'error samples': list(error),
'out_dir': qc_out_dir}]
else:
return []
def get_qsig_multiqc_files(*samples):
work_dir = samples[0][0]["dirs"]["work"]
qc_out_dir = utils.safe_makedir(os.path.join(work_dir, "qc", "qsignature"))
return [os.path.join(qc_out_dir, "qsignature.ma")]
def _parse_qsignature_output(in_file, out_file, warning_file, data):
""" Parse xml file produced by qsignature
:param in_file: (str) with the path to the xml file
:param out_file: (str) with the path to output file
:param warning_file: (str) with the path to warning file
:returns: (list) with samples that could be duplicated
"""
name = {}
error, warnings, similar = set(), set(), set()
same, replicate, related = 0, 0.1, 0.18
mixup_check = dd.get_mixup_check(data)
if mixup_check == "qsignature_full":
same, replicate, related = 0, 0.01, 0.061
with open(in_file, 'r') as in_handle:
with file_transaction(data, out_file) as out_tx_file:
with file_transaction(data, warning_file) as warn_tx_file:
with open(out_tx_file, 'w') as out_handle:
with open(warn_tx_file, 'w') as warn_handle:
et = ET.parse(in_handle)
for i in list(et.iter('file')):
name[i.attrib['id']] = os.path.basename(i.attrib['name']).replace(".qsig.vcf", "")
for i in list(et.iter('comparison')):
msg = None
pair = "-".join([name[i.attrib['file1']], name[i.attrib['file2']]])
out_handle.write("%s\t%s\t%s\n" %
(name[i.attrib['file1']], name[i.attrib['file2']], i.attrib['score']))
if float(i.attrib['score']) == same:
msg = 'qsignature ERROR: read same samples:%s\n'
error.add(pair)
elif float(i.attrib['score']) < replicate:
msg = 'qsignature WARNING: read similar/replicate samples:%s\n'
warnings.add(pair)
elif float(i.attrib['score']) < related:
msg = 'qsignature NOTE: read relative samples:%s\n'
similar.add(pair)
if msg:
logger.info(msg % pair)
warn_handle.write(msg % pair)
return error, warnings, similar
def _slice_bam_chr21(in_bam, data):
"""
return only one BAM file with only chromosome 21
"""
sambamba = config_utils.get_program("sambamba", data["config"])
out_file = "%s-chr%s" % os.path.splitext(in_bam)
if not utils.file_exists(out_file):
bam.index(in_bam, data['config'])
with pysam.Samfile(in_bam, "rb") as bamfile:
bam_contigs = [c["SN"] for c in bamfile.header["SQ"]]
chromosome = "21"
if "chr21" in bam_contigs:
chromosome = "chr21"
with file_transaction(data, out_file) as tx_out_file:
cmd = ("{sambamba} slice -o {tx_out_file} {in_bam} {chromosome}").format(**locals())
out = subprocess.check_output(cmd, shell=True)
return out_file
def _slice_vcf_chr21(vcf_file, out_dir):
"""
Slice chr21 of qsignature SNPs to reduce computation time
"""
tmp_file = os.path.join(out_dir, "chr21_qsignature.vcf")
if not utils.file_exists(tmp_file):
cmd = ("grep chr21 {vcf_file} > {tmp_file}").format(**locals())
out = subprocess.check_output(cmd, shell=True)
return tmp_file
```
#### File: bcbio/qc/samtools.py
```python
import os
import toolz as tz
from bcbio.distributed.transaction import file_transaction
from bcbio import utils
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
def run(_, data, out_dir=None):
"""Run samtools stats with reports on mapped reads, duplicates and insert sizes.
"""
stats_file, idxstats_file = _get_stats_files(data, out_dir)
samtools = config_utils.get_program("samtools", data["config"])
bam_file = dd.get_align_bam(data) or dd.get_work_bam(data)
if not utils.file_exists(stats_file):
utils.safe_makedir(out_dir)
with file_transaction(data, stats_file) as tx_out_file:
cores = dd.get_num_cores(data)
cmd = "{samtools} stats -@ {cores} {bam_file}"
cmd += " > {tx_out_file}"
do.run(cmd.format(**locals()), "samtools stats", data)
if not utils.file_exists(idxstats_file):
utils.safe_makedir(out_dir)
with file_transaction(data, idxstats_file) as tx_out_file:
cmd = "{samtools} idxstats {bam_file}"
cmd += " > {tx_out_file}"
do.run(cmd.format(**locals()), "samtools index stats", data)
out = {"base": idxstats_file, "secondary": [stats_file]}
out["metrics"] = _parse_samtools_stats(stats_file)
return out
def run_and_save(data):
"""Run QC, saving file outputs in data dictionary.
"""
run(None, data)
stats_file, idxstats_file = _get_stats_files(data)
data = tz.update_in(data, ["depth", "samtools", "stats"], lambda x: stats_file)
data = tz.update_in(data, ["depth", "samtools", "idxstats"], lambda x: idxstats_file)
return data
def _get_stats_files(data, out_dir=None):
"""Retrieve stats files from pre-existing dictionary or filesystem.
"""
if not out_dir:
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data),
"qc", dd.get_sample_name(data), "samtools"))
stats_file = tz.get_in(["depth", "samtools", "stats"], data)
idxstats_file = tz.get_in(["depth", "samtools", "idxstats"], data)
if not stats_file:
stats_file = os.path.join(out_dir, "%s.txt" % dd.get_sample_name(data))
if not idxstats_file:
idxstats_file = os.path.join(out_dir, "%s-idxstats.txt" % dd.get_sample_name(data))
return stats_file, idxstats_file
def _parse_samtools_stats(stats_file):
out = {}
want = {"raw total sequences": "Total_reads",
"reads mapped": "Mapped_reads",
"reads mapped and paired": "Mapped_paired_reads",
"reads duplicated": "Duplicates",
"insert size average": "Average_insert_size",
"average length": "Average_read_length",
}
with open(stats_file) as in_handle:
for line in in_handle:
if not line.startswith("SN"):
continue
parts = line.split("\t")
metric, stat_str = parts[1:3]
metric = metric.replace(":", "").strip()
if metric in want:
stat = float(stat_str.strip())
out[want[metric]] = stat
# Ensure we have zero values for any metrics not present in stats output
for metric in want.values():
if metric not in out:
out[metric] = 0
return out
```
#### File: bcbio/rnaseq/arriba.py
```python
import os
from bcbio.heterogeneity import chromhacks
from bcbio.pipeline import config_utils, shared
from bcbio.pipeline import datadict as dd
from bcbio.log import logger
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance import do
SUPPORTED_BUILDS = ("hg38", "GRCh37", "hg19")
def run_arriba(data):
build = dd.get_genome_build(data)
if build not in SUPPORTED_BUILDS:
logger.info(f"{build} not supported for arriba, skipping.")
return data
arriba_dir = os.path.join(dd.get_work_dir(data), "arriba", dd.get_sample_name(data))
utils.safe_makedir(arriba_dir)
bam_file = dd.get_work_bam(data)
ref_file = dd.get_ref_file(data)
gtf = dd.get_gtf_file(data)
arriba = config_utils.get_program("arriba", data)
fusion_file = os.path.join(arriba_dir, "fusions.tsv")
discarded_fusion_file = os.path.join(arriba_dir, "fusions.discarded.tsv")
blacklist_file = get_arriba_blacklist_file(data)
contigs = get_contigs(data)
contig_list = ",".join(contigs)
if utils.file_exists(fusion_file):
data["arriba"] = {"fusions": fusion_file, "discarded": discarded_fusion_file}
return(data)
with file_transaction(fusion_file) as tx_fusion_file, \
file_transaction(discarded_fusion_file) as tx_discarded_fusion_file:
cmd = (f"{arriba} -x {bam_file} -g {gtf} -a {ref_file} -o {tx_fusion_file} "
f"-O {tx_discarded_fusion_file} "
f"-i {contig_list} ")
if blacklist_file:
logger.info(f"arriba blacklist file found, running blacklisting with {blacklist_file}.")
cmd += (f"-b {blacklist_file} ")
else:
logger.info("arriba blacklist file not found, disabling blacklist filtering.")
cmd += (f"-f blacklist ")
if dd.get_known_fusions(data):
cmd += (f"-k {dd.get_known_fusions(data)} ")
message = f"Running arriba on {dd.get_sample_name(data)}."
do.run(cmd, message)
data["arriba"] = {"fusions": fusion_file, "discarded": discarded_fusion_file}
return(data)
def get_arriba_blacklist_file(data):
arriba_dir = os.path.join(os.path.dirname(dd.get_gtf_file(data)),
"fusion-blacklist")
blacklist = os.path.join(arriba_dir, "arriba-blacklist.tsv.gz")
if utils.file_exists(blacklist):
return blacklist
else:
return None
def get_contigs(data):
contigs = [x.name for x in shared.get_noalt_contigs(data)]
keep = [x for x in contigs if chromhacks.is_autosomal(x) or chromhacks.is_sex(x)]
return keep
```
#### File: bcbio/server/background.py
```python
from __future__ import print_function
import subprocess
import tornado.ioloop
import time
import fcntl
import functools
import os
class GenericSubprocess (object):
def __init__ ( self, timeout=-1, **popen_args ):
self.args = dict()
self.args["stdout"] = subprocess.PIPE
self.args["stderr"] = subprocess.PIPE
self.args["close_fds"] = True
self.args.update(popen_args)
self.ioloop = None
self.expiration = None
self.pipe = None
self.timeout = timeout
self.streams = []
self.has_timed_out = False
def start(self):
"""Spawn the task.
Throws RuntimeError if the task was already started."""
if not self.pipe is None:
raise RuntimeError("Cannot start task twice")
self.ioloop = tornado.ioloop.IOLoop.instance()
if self.timeout > 0:
self.expiration = self.ioloop.add_timeout( time.time() + self.timeout, self.on_timeout )
self.pipe = subprocess.Popen(**self.args)
self.streams = [ (self.pipe.stdout.fileno(), []),
(self.pipe.stderr.fileno(), []) ]
for fd, d in self.streams:
flags = fcntl.fcntl(fd, fcntl.F_GETFL)| os.O_NDELAY
fcntl.fcntl( fd, fcntl.F_SETFL, flags)
self.ioloop.add_handler( fd,
self.stat,
self.ioloop.READ|self.ioloop.ERROR)
def on_timeout(self):
self.has_timed_out = True
self.cancel()
def cancel (self ) :
"""Cancel task execution
Sends SIGKILL to the child process."""
try:
self.pipe.kill()
except:
pass
def stat( self, *args ):
'''Check process completion and consume pending I/O data'''
self.pipe.poll()
if not self.pipe.returncode is None:
'''cleanup handlers and timeouts'''
if not self.expiration is None:
self.ioloop.remove_timeout(self.expiration)
for fd, dest in self.streams:
self.ioloop.remove_handler(fd)
'''schedulle callback (first try to read all pending data)'''
self.ioloop.add_callback(self.on_finish)
for fd, dest in self.streams:
while True:
try:
data = os.read(fd, 4096)
if len(data) == 0:
break
print(data.rstrip())
except:
break
@property
def stdout(self):
return self.get_output(0)
@property
def stderr(self):
return self.get_output(1)
@property
def status(self):
return self.pipe.returncode
def get_output(self, index ):
return "".join(self.streams[index][1])
def on_finish(self):
raise NotImplemented()
class Subprocess (GenericSubprocess):
"""Create new instance
Arguments:
callback: method to be called after completion. This method should take 3 arguments: statuscode(int), stdout(str), stderr(str), has_timed_out(boolean)
timeout: wall time allocated for the process to complete. After this expires Task.cancel is called. A negative timeout value means no limit is set
The task is not started until start is called. The process will then be spawned using subprocess.Popen(**popen_args). The stdout and stderr are always set to subprocess.PIPE.
"""
def __init__ ( self, callback, *args, **kwargs):
"""Create new instance
Arguments:
callback: method to be called after completion. This method should take 3 arguments: statuscode(int), stdout(str), stderr(str), has_timed_out(boolean)
timeout: wall time allocated for the process to complete. After this expires Task.cancel is called. A negative timeout value means no limit is set
The task is not started until start is called. The process will then be spawned using subprocess.Popen(**popen_args). The stdout and stderr are always set to subprocess.PIPE.
"""
self.callback = callback
self.done_callback = False
GenericSubprocess.__init__(self, *args, **kwargs)
def on_finish(self):
if not self.done_callback:
self.done_callback = True
'''prevent calling callback twice'''
self.ioloop.add_callback(functools.partial(self.callback, self.status, self.stdout, self.stderr, self.has_timed_out))
```
#### File: bcbio/server/main.py
```python
import tornado.web
import tornado.ioloop
from bcbio.server import run
def start(args):
"""Run server with provided command line arguments.
"""
application = tornado.web.Application([(r"/run", run.get_handler(args)),
(r"/status", run.StatusHandler)])
application.runmonitor = RunMonitor()
application.listen(args.port)
tornado.ioloop.IOLoop.instance().start()
class RunMonitor:
"""Track current runs and provide status.
"""
def __init__(self):
self._running = {}
def set_status(self, run_id, status):
self._running[run_id] = status
def get_status(self, run_id):
return self._running.get(run_id, "not-running")
def add_subparser(subparsers):
"""Add command line arguments as server subparser.
"""
parser = subparsers.add_parser("server", help="Run a bcbio-nextgen server allowing remote job execution.")
parser.add_argument("-c", "--config", help=("Global YAML configuration file specifying system details."
"Defaults to installed bcbio_system.yaml"))
parser.add_argument("-p", "--port", help="Port to listen on (default 8080)",
default=8080, type=int)
parser.add_argument("-n", "--cores", help="Cores to use when processing locally when not requested (default 1)",
default=1, type=int)
parser.add_argument("-d", "--biodata_dir", help="Directory with biological data",
default="/mnt/biodata", type=str)
return parser
```
#### File: bcbio/structural/purple.py
```python
import csv
import os
import re
import shutil
import subprocess
import toolz as tz
from bcbio import broad, utils
from bcbio.log import logger
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.structural import titancna
from bcbio.variation import vcfutils
def run(items):
paired = vcfutils.get_paired(items)
if not paired or not paired.normal_name:
logger.info("Skipping PURPLE; need tumor/normal somatic calls in batch: %s" %
" ".join([dd.get_sample_name(d) for d in items]))
return items
work_dir = _sv_workdir(paired.tumor_data)
from bcbio import heterogeneity
vrn_files = heterogeneity.get_variants(paired.tumor_data, include_germline=False)
het_file = _amber_het_file("pon", vrn_files, work_dir, paired)
depth_file = _run_cobalt(paired, work_dir)
purple_out = _run_purple(paired, het_file, depth_file, vrn_files, work_dir)
out = []
if paired.normal_data:
out.append(paired.normal_data)
if "sv" not in paired.tumor_data:
paired.tumor_data["sv"] = []
paired.tumor_data["sv"].append(purple_out)
out.append(paired.tumor_data)
return out
def _get_jvm_opts(out_file, data):
"""Retrieve Java options, adjusting memory for available cores.
"""
resources = config_utils.get_resources("purple", data["config"])
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx3500m"])
jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust":
{"direction": "increase",
"maximum": "30000M",
"magnitude": dd.get_cores(data)}}})
jvm_opts += broad.get_default_jvm_opts(os.path.dirname(out_file))
return jvm_opts
def _run_purple(paired, het_file, depth_file, vrn_files, work_dir):
"""Run PURPLE with pre-calculated AMBER and COBALT compatible inputs.
"""
purple_dir = utils.safe_makedir(os.path.join(work_dir, "purple"))
out_file = os.path.join(purple_dir, "%s.purple.cnv" % dd.get_sample_name(paired.tumor_data))
if not utils.file_exists(out_file):
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
cmd = ["PURPLE"] + _get_jvm_opts(tx_out_file, paired.tumor_data) + \
["-amber", os.path.dirname(het_file), "-baf", het_file,
"-cobalt", os.path.dirname(depth_file),
"-gc_profile", dd.get_variation_resources(paired.tumor_data)["gc_profile"],
"-output_dir", os.path.dirname(tx_out_file),
"-ref_genome", "hg38" if dd.get_genome_build(paired.tumor_data) == "hg38" else "hg19",
"-run_dir", work_dir,
"-threads", dd.get_num_cores(paired.tumor_data),
"-tumor_sample", dd.get_sample_name(paired.tumor_data),
"-ref_sample", dd.get_sample_name(paired.normal_data)]
if vrn_files:
cmd += ["-somatic_vcf", vrn_files[0]["vrn_file"]]
# Avoid X11 display errors when writing plots
cmd = "unset DISPLAY && %s" % " ".join([str(x) for x in cmd])
do.run(cmd, "PURPLE: purity and ploidy estimation")
for f in os.listdir(os.path.dirname(tx_out_file)):
if f != os.path.basename(tx_out_file):
shutil.move(os.path.join(os.path.dirname(tx_out_file), f),
os.path.join(purple_dir, f))
out_file_export = os.path.join(purple_dir, "%s-purple-cnv.tsv" % (dd.get_sample_name(paired.tumor_data)))
if not utils.file_exists(out_file_export):
utils.symlink_plus(out_file, out_file_export)
out = {"variantcaller": "purple", "call_file": out_file_export,
"vrn_file": titancna.to_vcf(out_file_export, "PURPLE", _get_header, _export_to_vcf,
paired.tumor_data),
"plot": {}, "metrics": {}}
for name, ext in [("copy_number", "copyNumber"), ("minor_allele", "minor_allele"), ("variant", "variant")]:
plot_file = os.path.join(purple_dir, "plot", "%s.%s.png" % (dd.get_sample_name(paired.tumor_data), ext))
if os.path.exists(plot_file):
out["plot"][name] = plot_file
purity_file = os.path.join(purple_dir, "%s.purple.purity" % dd.get_sample_name(paired.tumor_data))
with open(purity_file) as in_handle:
header = in_handle.readline().replace("#", "").split("\t")
vals = in_handle.readline().split("\t")
for h, v in zip(header, vals):
try:
v = float(v)
except ValueError:
pass
out["metrics"][h] = v
return out
def _normalize_baf(baf):
"""Provide normalized BAF in the same manner as Amber, relative to het.
https://github.com/hartwigmedical/hmftools/blob/637e3db1a1a995f4daefe2d0a1511a5bdadbeb05/hmf-common/src7/main/java/com/hartwig/hmftools/common/amber/AmberBAF.java#L16
"""
if baf is None:
baf = 0.0
return 0.5 + abs(baf - 0.5)
def _counts_to_amber(t_vals, n_vals):
"""Converts a line of CollectAllelicCounts into AMBER line.
"""
t_depth = int(t_vals["REF_COUNT"]) + int(t_vals["ALT_COUNT"])
n_depth = int(n_vals["REF_COUNT"]) + int(n_vals["ALT_COUNT"])
if n_depth > 0 and t_depth > 0:
t_baf = float(t_vals["ALT_COUNT"]) / float(t_depth)
n_baf = float(n_vals["ALT_COUNT"]) / float(n_depth)
return [t_vals["CONTIG"], t_vals["POSITION"], t_baf, _normalize_baf(t_baf), t_depth,
n_baf, _normalize_baf(n_baf), n_depth]
def _count_files_to_amber(tumor_counts, normal_counts, work_dir, data):
"""Converts tumor and normal counts from GATK CollectAllelicCounts into Amber format.
"""
amber_dir = utils.safe_makedir(os.path.join(work_dir, "amber"))
out_file = os.path.join(amber_dir, "%s.amber.baf" % dd.get_sample_name(data))
if not utils.file_uptodate(out_file, tumor_counts):
with file_transaction(data, out_file) as tx_out_file:
with open(tumor_counts) as tumor_handle:
with open(normal_counts) as normal_handle:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle, delimiter="\t")
writer.writerow(["Chromosome", "Position", "TumorBAF", "TumorModifiedBAF", "TumorDepth",
"NormalBAF", "NormalModifiedBAF", "NormalDepth"])
header = None
for t, n in zip(tumor_handle, normal_handle):
if header is None and t.startswith("CONTIG"):
header = t.strip().split()
elif header is not None:
t_vals = dict(zip(header, t.strip().split()))
n_vals = dict(zip(header, n.strip().split()))
amber_line = _counts_to_amber(t_vals, n_vals)
if amber_line:
writer.writerow(amber_line)
return out_file
class AmberWriter:
def __init__(self, out_handle):
self.writer = csv.writer(out_handle, delimiter="\t")
def write_header(self):
self.writer.writerow(["Chromosome", "Position", "TumorBAF", "TumorModifiedBAF", "TumorDepth",
"NormalBAF", "NormalModifiedBAF", "NormalDepth"])
def write_row(self, rec, stats):
if stats["normal"]["freq"] is not None and stats["normal"]["depth"] is not None:
self.writer.writerow([rec.chrom, rec.pos,
stats["tumor"]["freq"], _normalize_baf(stats["tumor"]["freq"]),
stats["tumor"]["depth"],
stats["normal"]["freq"], _normalize_baf(stats["normal"]["freq"]),
stats["normal"]["depth"]])
def _amber_het_file(method, vrn_files, work_dir, paired):
"""Create file of BAFs in normal heterozygous positions compatible with AMBER.
Two available methods:
- pon -- Use panel of normals with likely heterozygous sites.
- variants -- Use pre-existing variant calls, filtered to likely heterozygotes.
https://github.com/hartwigmedical/hmftools/tree/master/amber
https://github.com/hartwigmedical/hmftools/blob/637e3db1a1a995f4daefe2d0a1511a5bdadbeb05/hmf-common/src/test/resources/amber/new.amber.baf
"""
assert vrn_files, "Did not find compatible variant calling files for PURPLE inputs"
from bcbio.heterogeneity import bubbletree
if method == "variants":
amber_dir = utils.safe_makedir(os.path.join(work_dir, "amber"))
out_file = os.path.join(amber_dir, "%s.amber.baf" % dd.get_sample_name(paired.tumor_data))
prep_file = bubbletree.prep_vrn_file(vrn_files[0]["vrn_file"], vrn_files[0]["variantcaller"],
work_dir, paired, AmberWriter)
utils.symlink_plus(prep_file, out_file)
pcf_file = out_file + ".pcf"
if not utils.file_exists(pcf_file):
with file_transaction(paired.tumor_data, pcf_file) as tx_out_file:
r_file = os.path.join(os.path.dirname(tx_out_file), "bafSegmentation.R")
with open(r_file, "w") as out_handle:
out_handle.write(_amber_seg_script)
cmd = "%s && %s --vanilla %s %s %s" % (utils.get_R_exports(), utils.Rscript_cmd(), r_file,
out_file, pcf_file)
do.run(cmd, "PURPLE: AMBER baf segmentation")
else:
assert method == "pon"
out_file = _run_amber(paired, work_dir)
return out_file
def _run_amber(paired, work_dir, lenient=False):
"""AMBER: calculate allele frequencies at likely heterozygous sites.
lenient flag allows amber runs on small test sets.
"""
amber_dir = utils.safe_makedir(os.path.join(work_dir, "amber"))
out_file = os.path.join(amber_dir, "%s.amber.baf" % dd.get_sample_name(paired.tumor_data))
if not utils.file_exists(out_file) or not utils.file_exists(out_file + ".pcf"):
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
key = "germline_het_pon"
het_bed = tz.get_in(["genome_resources", "variation", key], paired.tumor_data)
cmd = ["AMBER"] + _get_jvm_opts(tx_out_file, paired.tumor_data) + \
["-threads", dd.get_num_cores(paired.tumor_data),
"-tumor", dd.get_sample_name(paired.tumor_data),
"-tumor_bam", dd.get_align_bam(paired.tumor_data),
"-reference", dd.get_sample_name(paired.normal_data),
"-reference_bam", dd.get_align_bam(paired.normal_data),
"-ref_genome", dd.get_ref_file(paired.tumor_data),
"-bed", het_bed,
"-output_dir", os.path.dirname(tx_out_file)]
if lenient:
cmd += ["-max_het_af_percent", "1.0"]
try:
do.run(cmd, "PURPLE: AMBER baf generation")
except subprocess.CalledProcessError as msg:
if not lenient and _amber_allowed_errors(str(msg)):
return _run_amber(paired, work_dir, True)
for f in os.listdir(os.path.dirname(tx_out_file)):
if f != os.path.basename(tx_out_file):
shutil.move(os.path.join(os.path.dirname(tx_out_file), f),
os.path.join(amber_dir, f))
return out_file
def _amber_allowed_errors(msg):
allowed = ["R execution failed. Unable to complete segmentation."]
return any([len(re.findall(m, msg)) > 0 for m in allowed])
# BAF segmentation with copynumber from AMBER
# https://github.com/hartwigmedical/hmftools/blob/master/amber/src/main/resources/r/bafSegmentation.R
_amber_seg_script = """
# Parse the arguments
args <- commandArgs(trailing=T)
bafFile <- args[1]
pcfFile <- args[2]
library(copynumber)
baf <- read.table(bafFile, header=TRUE)
chromosomeLevels = levels(baf$Chromosome)
chromosomePrefix = ""
if (any(grepl("chr", chromosomeLevels, ignore.case = T))) {
chromosomePrefix = substr(chromosomeLevels[1], 1, 3)
}
baf <- baf[,c("Chromosome","Position","TumorModifiedBAF")]
baf$Chromosome <- gsub(chromosomePrefix, "", baf$Chromosome, ignore.case = T)
baf.seg<-pcf(baf,verbose=FALSE,gamma=100,kmin=1)
baf.seg$chrom = paste0(chromosomePrefix, baf.seg$chrom)
write.table(baf.seg, file = pcfFile, row.names = F, sep = "\t", quote = F)
"""
def _run_cobalt(paired, work_dir):
"""Run Cobalt for counting read depth across genomic windows.
PURPLE requires even 1000bp windows so use integrated counting solution
directly rather than converting from CNVkit calculations. If this approach
is useful should be moved upstream to be available to other tools as
an input comparison.
https://github.com/hartwigmedical/hmftools/tree/master/count-bam-lines
"""
cobalt_dir = utils.safe_makedir(os.path.join(work_dir, "cobalt"))
out_file = os.path.join(cobalt_dir, "%s.cobalt" % dd.get_sample_name(paired.tumor_data))
if not utils.file_exists(out_file):
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
cmd = ["COBALT"] + _get_jvm_opts(tx_out_file, paired.tumor_data) + \
["-reference", paired.normal_name, "-reference_bam", paired.normal_bam,
"-tumor", paired.tumor_name, "-tumor_bam", paired.tumor_bam,
"-threads", dd.get_num_cores(paired.tumor_data),
"-output_dir", os.path.dirname(tx_out_file),
"-gc_profile", dd.get_variation_resources(paired.tumor_data)["gc_profile"]]
cmd = "%s && %s" % (utils.get_R_exports(), " ".join([str(x) for x in cmd]))
do.run(cmd, "PURPLE: COBALT read depth normalization")
for f in os.listdir(os.path.dirname(tx_out_file)):
if f != os.path.basename(tx_out_file):
shutil.move(os.path.join(os.path.dirname(tx_out_file), f),
os.path.join(cobalt_dir, f))
return out_file
def _cobalt_ratio_file(paired, work_dir):
"""Convert CNVkit binning counts into cobalt ratio output.
This contains read counts plus normalization for GC, from section 7.2
"Determine read depth ratios for tumor and reference genomes"
https://www.biorxiv.org/content/biorxiv/early/2018/09/20/415133.full.pdf
Since CNVkit cnr files already have GC bias correction, we re-center
the existing log2 ratios to be around 1, rather than zero, which matches
the cobalt expectations.
XXX This doesn't appear to be a worthwhile direction since PURPLE requires
1000bp even binning. We'll leave this here as a starting point for future
work but work on using cobalt directly.
"""
cobalt_dir = utils.safe_makedir(os.path.join(work_dir, "cobalt"))
out_file = os.path.join(cobalt_dir, "%s.cobalt" % dd.get_sample_name(paired.tumor_data))
if not utils.file_exists(out_file):
cnr_file = tz.get_in(["depth", "bins", "normalized"], paired.tumor_data)
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle, delimiter="\t")
writer.writerow(["Chromosome", "Position", "ReferenceReadCount", "TumorReadCount",
"ReferenceGCRatio", "TumorGCRatio", "ReferenceGCDiploidRatio"])
raise NotImplementedError
return out_file
def _sv_workdir(data):
return utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural",
dd.get_sample_name(data), "purple"))
# ## VCF output
def _get_header(in_handle):
return in_handle.readline().replace("#", "").strip().split(), in_handle
def _export_to_vcf(cur):
"""Convert PURPLE custom output into VCF.
"""
if float(cur["copyNumber"]) > 2.0:
svtype = "DUP"
elif float(cur["copyNumber"]) < 2.0:
svtype = "DEL"
else:
svtype = None
if svtype:
info = ["END=%s" % cur["end"], "SVLEN=%s" % (int(cur["end"]) - int(cur["start"])),
"SVTYPE=%s" % svtype, "CN=%s" % cur["copyNumber"], "PROBES=%s" % cur["depthWindowCount"]]
return [cur["chromosome"], cur["start"], ".", "N", "<%s>" % svtype, ".", ".",
";".join(info), "GT", "0/1"]
```
#### File: bcbio/structural/validate.py
```python
import csv
import os
import six
import toolz as tz
import numpy as np
import pandas as pd
import pybedtools
from bcbio.log import logger
from bcbio import utils
from bcbio.bam import ref
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.structural import convert
from bcbio.distributed.transaction import file_transaction
from bcbio.variation import vcfutils, ploidy, validateplot
from bcbio.pipeline import config_utils
mpl = utils.LazyImport("matplotlib")
plt = utils.LazyImport("matplotlib.pyplot")
sns = utils.LazyImport("seaborn")
# -- VCF based validation
def _evaluate_vcf(calls, truth_vcf, work_dir, data):
out_file = os.path.join(work_dir, os.path.join("%s-sv-validate.csv" % dd.get_sample_name(data)))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["sample", "caller", "vtype", "metric", "value"])
for call in calls:
detail_dir = utils.safe_makedir(os.path.join(work_dir, call["variantcaller"]))
if call.get("vrn_file"):
for stats in _validate_caller_vcf(call["vrn_file"], truth_vcf, dd.get_sample_callable(data),
call["variantcaller"], detail_dir, data):
writer.writerow(stats)
return out_file
def _validate_caller_vcf(call_vcf, truth_vcf, callable_bed, svcaller, work_dir, data):
"""Validate a caller VCF against truth within callable regions using SURVIVOR.
Combines files with SURIVOR merge and counts (https://github.com/fritzsedlazeck/SURVIVOR/)
"""
stats = _calculate_comparison_stats(truth_vcf)
call_vcf = _prep_vcf(call_vcf, callable_bed, dd.get_sample_name(data), dd.get_sample_name(data),
stats, work_dir, data)
truth_vcf = _prep_vcf(truth_vcf, callable_bed, vcfutils.get_samples(truth_vcf)[0],
"%s-truth" % dd.get_sample_name(data), stats, work_dir, data)
cmp_vcf = _survivor_merge(call_vcf, truth_vcf, stats, work_dir, data)
return _comparison_stats_from_merge(cmp_vcf, stats, svcaller, data)
def _comparison_stats_from_merge(in_file, stats, svcaller, data):
"""Extract true/false positive/negatives from a merged SURIVOR VCF.
"""
truth_stats = {"tp": [], "fn": [], "fp": []}
samples = ["truth" if x.endswith("-truth") else "eval" for x in vcfutils.get_samples(in_file)]
with open(in_file) as in_handle:
for call in (l.rstrip().split("\t") for l in in_handle if not l.startswith("#")):
supp_vec_str = [x for x in call[7].split(";") if x.startswith("SUPP_VEC=")][0]
_, supp_vec = supp_vec_str.split("=")
calls = dict(zip(samples, [int(x) for x in supp_vec]))
if calls["truth"] and calls["eval"]:
metric = "tp"
elif calls["truth"]:
metric = "fn"
else:
metric = "fp"
truth_stats[metric].append(_summarize_call(call))
return _to_csv(truth_stats, stats, dd.get_sample_name(data), svcaller)
def _survivor_merge(call_vcf, truth_vcf, stats, work_dir, data):
"""Perform a merge of two callsets using SURVIVOR,
"""
out_file = os.path.join(work_dir, "eval-merge.vcf")
if not utils.file_uptodate(out_file, call_vcf):
in_call_vcf = call_vcf.replace(".vcf.gz", ".vcf")
if not utils.file_exists(in_call_vcf):
with file_transaction(data, in_call_vcf) as tx_in_call_vcf:
do.run("gunzip -c {call_vcf} > {tx_in_call_vcf}".format(**locals()))
in_truth_vcf = truth_vcf.replace(".vcf.gz", ".vcf")
if not utils.file_exists(in_truth_vcf):
with file_transaction(data, in_truth_vcf) as tx_in_truth_vcf:
do.run("gunzip -c {truth_vcf} > {tx_in_truth_vcf}".format(**locals()))
in_list_file = os.path.join(work_dir, "eval-inputs.txt")
with open(in_list_file, "w") as out_handle:
out_handle.write("%s\n%s\n" % (in_call_vcf, in_truth_vcf))
with file_transaction(data, out_file) as tx_out_file:
cmd = ("SURVIVOR merge {in_list_file} {stats[merge_size]} 1 0 0 0 {stats[min_size]} {tx_out_file}")
do.run(cmd.format(**locals()), "Merge SV files for validation: %s" % dd.get_sample_name(data))
return out_file
def _to_csv(truth_stats, stats, sample, svcaller):
out = []
for metric, vals in truth_stats.items():
for svtype in sorted(list(stats["svtypes"])):
count = len([x for x in vals if x["svtype"] == svtype])
out.append([sample, svcaller, svtype, metric, count])
for start, end in stats["ranges"]:
count = len([x for x in vals if (x["svtype"] == svtype
and x["size"] >= start and x["size"] < end)])
out.append([sample, svcaller, "%s_%s-%s" % (svtype, start, end), metric, count])
return out
def _calculate_comparison_stats(truth_vcf):
"""Identify calls to validate from the input truth VCF.
"""
# Avoid very small events for average calculations
min_stat_size = 50
min_median_size = 250
sizes = []
svtypes = set([])
with utils.open_gzipsafe(truth_vcf) as in_handle:
for call in (l.rstrip().split("\t") for l in in_handle if not l.startswith("#")):
stats = _summarize_call(call)
if stats["size"] > min_stat_size:
sizes.append(stats["size"])
svtypes.add(stats["svtype"])
pct10 = int(np.percentile(sizes, 10))
pct25 = int(np.percentile(sizes, 25))
pct50 = int(np.percentile(sizes, 50))
pct75 = int(np.percentile(sizes, 75))
ranges_detailed = [(int(min(sizes)), pct10), (pct10, pct25), (pct25, pct50),
(pct50, pct75), (pct75, max(sizes))]
ranges_split = [(int(min(sizes)), pct50), (pct50, max(sizes))]
return {"min_size": int(min(sizes) * 0.95), "max_size": int(max(sizes) + 1.05),
"svtypes": svtypes, "merge_size": int(np.percentile([x for x in sizes if x > min_median_size], 50)),
"ranges": []}
def _get_start_end(parts, index=7):
"""Retrieve start and end for a VCF record, skips BNDs without END coords
"""
start = parts[1]
end = [x.split("=")[-1] for x in parts[index].split(";") if x.startswith("END=")]
if end:
end = end[0]
return start, end
return None, None
def _summarize_call(parts):
"""Provide summary metrics on size and svtype for a SV call.
"""
svtype = [x.split("=")[1] for x in parts[7].split(";") if x.startswith("SVTYPE=")]
svtype = svtype[0] if svtype else ""
start, end = _get_start_end(parts)
return {"svtype": svtype, "size": int(end) - int(start)}
def _prep_vcf(in_file, region_bed, sample, new_sample, stats, work_dir, data):
"""Prepare VCF for SV validation:
- Subset to passing variants
- Subset to genotyped variants -- removes reference and no calls
- Selects and names samples
- Subset to callable regions
- Remove larger annotations which slow down VCF processing
"""
in_file = vcfutils.bgzip_and_index(in_file, data, remove_orig=False)
out_file = os.path.join(work_dir, "%s-vprep.vcf.gz" % utils.splitext_plus(os.path.basename(in_file))[0])
if not utils.file_uptodate(out_file, in_file):
callable_bed = _prep_callable_bed(region_bed, work_dir, stats, data)
with file_transaction(data, out_file) as tx_out_file:
ann_remove = _get_anns_to_remove(in_file)
ann_str = " | bcftools annotate -x {ann_remove}" if ann_remove else ""
cmd = ("bcftools view -T {callable_bed} -f 'PASS,.' --min-ac '1:nref' -s {sample} {in_file} "
+ ann_str +
r"| sed 's|\t{sample}|\t{new_sample}|' "
"| bgzip -c > {out_file}")
do.run(cmd.format(**locals()), "Create SV validation VCF for %s" % new_sample)
return vcfutils.bgzip_and_index(out_file, data["config"])
def _prep_callable_bed(in_file, work_dir, stats, data):
"""Sort and merge callable BED regions to prevent SV double counting
"""
out_file = os.path.join(work_dir, "%s-merge.bed.gz" % utils.splitext_plus(os.path.basename(in_file))[0])
gsort = config_utils.get_program("gsort", data)
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
fai_file = ref.fasta_idx(dd.get_ref_file(data))
cmd = ("{gsort} {in_file} {fai_file} | bedtools merge -i - -d {stats[merge_size]} | "
"bgzip -c > {tx_out_file}")
do.run(cmd.format(**locals()), "Prepare SV callable BED regions")
return vcfutils.bgzip_and_index(out_file, data["config"])
def _get_anns_to_remove(in_file):
"""Find larger annotations, if present in VCF, that slow down processing.
"""
to_remove = ["ANN", "LOF"]
to_remove_str = tuple(["##INFO=<ID=%s" % x for x in to_remove])
cur_remove = []
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
break
elif line.startswith(to_remove_str):
cur_id = line.split("ID=")[-1].split(",")[0]
cur_remove.append("INFO/%s" % cur_id)
return ",".join(cur_remove)
# -- BED based evaluation
EVENT_SIZES = [(100, 450), (450, 2000), (2000, 4000), (4000, 20000), (20000, 60000),
(60000, int(1e6))]
def _stat_str(x, n):
if n > 0:
val = float(x) / float(n) * 100.0
return {"label": "%.1f%% (%s / %s)" % (val, x, n), "val": val}
else:
return {"label": "", "val": 0}
def cnv_to_event(name, data):
"""Convert a CNV to an event name.
"""
cur_ploidy = ploidy.get_ploidy([data])
if name.startswith("cnv"):
num = max([int(x) for x in name.split("_")[0].replace("cnv", "").split(";")])
if num < cur_ploidy:
return "DEL"
elif num > cur_ploidy:
return "DUP"
else:
return name
else:
return name
def _evaluate_one(caller, svtype, size_range, ensemble, truth, data):
"""Compare a ensemble results for a caller against a specific caller and SV type.
"""
def cnv_matches(name):
return cnv_to_event(name, data) == svtype
def is_breakend(name):
return name.startswith("BND")
def in_size_range(max_buffer=0):
def _work(feat):
minf, maxf = size_range
buffer = min(max_buffer, int(((maxf + minf) / 2.0) / 10.0))
size = feat.end - feat.start
return size >= max([0, minf - buffer]) and size < maxf + buffer
return _work
def is_caller_svtype(feat):
for name in feat.name.split(","):
if ((name.startswith(svtype) or cnv_matches(name) or is_breakend(name))
and (caller == "sv-ensemble" or name.endswith(caller))):
return True
return False
minf, maxf = size_range
efeats = pybedtools.BedTool(ensemble).filter(in_size_range(0)).filter(is_caller_svtype).saveas().sort().merge()
tfeats = pybedtools.BedTool(truth).filter(in_size_range(0)).sort().merge().saveas()
etotal = efeats.count()
ttotal = tfeats.count()
match = efeats.intersect(tfeats, u=True).sort().merge().saveas().count()
return {"sensitivity": _stat_str(match, ttotal),
"precision": _stat_str(match, etotal)}
def _evaluate_multi(calls, truth_svtypes, work_dir, data):
base = os.path.join(work_dir, "%s-sv-validate" % (dd.get_sample_name(data)))
out_file = base + ".csv"
df_file = base + "-df.csv"
if any((not utils.file_uptodate(out_file, x["vrn_file"])
or not utils.file_uptodate(df_file, x["vrn_file"])) for x in calls):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
with open(df_file, "w") as df_out_handle:
writer = csv.writer(out_handle)
dfwriter = csv.writer(df_out_handle)
writer.writerow(["svtype", "size", "caller", "sensitivity", "precision"])
dfwriter.writerow(["svtype", "size", "caller", "metric", "value", "label"])
for svtype, truth in truth_svtypes.items():
for size in EVENT_SIZES:
str_size = "%s-%s" % size
for call in calls:
call_bed = convert.to_bed(call, dd.get_sample_name(data), work_dir, calls, data)
if utils.file_exists(call_bed):
evalout = _evaluate_one(call["variantcaller"], svtype, size, call_bed,
truth, data)
writer.writerow([svtype, str_size, call["variantcaller"],
evalout["sensitivity"]["label"], evalout["precision"]["label"]])
for metric in ["sensitivity", "precision"]:
dfwriter.writerow([svtype, str_size, call["variantcaller"], metric,
evalout[metric]["val"], evalout[metric]["label"]])
return out_file, df_file
def _plot_evaluation(df_csv):
if mpl is None or plt is None or sns is None:
not_found = ", ".join([x for x in ['mpl', 'plt', 'sns'] if eval(x) is None])
logger.info("No validation plot. Missing imports: %s" % not_found)
return None
mpl.use('Agg', force=True)
df = pd.read_csv(df_csv).fillna("0%")
out = {}
for event in df["svtype"].unique():
out[event] = _plot_evaluation_event(df_csv, event)
return out
def _plot_evaluation_event(df_csv, svtype):
"""Provide plot of evaluation metrics for an SV event, stratified by event size.
"""
titles = {"INV": "Inversions", "DEL": "Deletions", "DUP": "Duplications",
"INS": "Insertions"}
out_file = "%s-%s.png" % (os.path.splitext(df_csv)[0], svtype)
sns.set(style='white')
if not utils.file_uptodate(out_file, df_csv):
metrics = ["sensitivity", "precision"]
df = pd.read_csv(df_csv).fillna("0%")
df = df[(df["svtype"] == svtype)]
event_sizes = _find_events_to_include(df, EVENT_SIZES)
fig, axs = plt.subplots(len(event_sizes), len(metrics), tight_layout=True)
if len(event_sizes) == 1:
axs = [axs]
callers = sorted(df["caller"].unique())
if "sv-ensemble" in callers:
callers.remove("sv-ensemble")
callers.append("sv-ensemble")
for i, size in enumerate(event_sizes):
size_label = "%s to %sbp" % size
size = "%s-%s" % size
for j, metric in enumerate(metrics):
ax = axs[i][j]
ax.get_xaxis().set_ticks([])
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlim(0, 125.0)
if i == 0:
ax.set_title(metric, size=12, y=1.2)
vals, labels = _get_plot_val_labels(df, size, metric, callers)
ax.barh(range(1,len(vals)+1), vals)
if j == 0:
ax.tick_params(axis='y', which='major', labelsize=8)
ax.locator_params(axis="y", tight=True)
ax.set_yticks(range(1,len(callers)+1,1))
ax.set_yticklabels(callers, va="center")
ax.text(100, len(callers)+1, size_label, fontsize=10)
else:
ax.get_yaxis().set_ticks([])
for ai, (val, label) in enumerate(zip(vals, labels)):
ax.annotate(label, (val + 0.75, ai + 1), va='center', size=7)
if svtype in titles:
fig.text(0.025, 0.95, titles[svtype], size=14)
fig.set_size_inches(7, len(event_sizes) + 1)
fig.savefig(out_file)
return out_file
def _find_events_to_include(df, event_sizes):
out = []
for size in event_sizes:
str_size = "%s-%s" % size
curdf = df[(df["size"] == str_size) & (df["metric"] == "sensitivity")]
for val in list(curdf["label"]):
if val != "0%":
out.append(size)
break
return out
def _get_plot_val_labels(df, size, metric, callers):
curdf = df[(df["size"] == size) & (df["metric"] == metric)]
vals, labels = [], []
for caller in callers:
row = curdf[curdf["caller"] == caller]
val = list(row["value"])[0]
if val == 0:
val = 0.1
vals.append(val)
labels.append(list(row["label"])[0])
return vals, labels
# -- general functionality
def evaluate(data):
"""Provide evaluations for multiple callers split by structural variant type.
"""
work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
dd.get_sample_name(data), "validate"))
truth_sets = tz.get_in(["config", "algorithm", "svvalidate"], data)
if truth_sets and data.get("sv"):
if isinstance(truth_sets, dict):
val_summary, df_csv = _evaluate_multi(data["sv"], truth_sets, work_dir, data)
summary_plots = _plot_evaluation(df_csv)
data["sv-validate"] = {"csv": val_summary, "plot": summary_plots, "df": df_csv}
else:
assert isinstance(truth_sets, six.string_types) and utils.file_exists(truth_sets), truth_sets
val_summary = _evaluate_vcf(data["sv"], truth_sets, work_dir, data)
title = "%s structural variants" % dd.get_sample_name(data)
summary_plots = validateplot.classifyplot_from_valfile(val_summary, outtype="png", title=title)
data["sv-validate"] = {"csv": val_summary, "plot": summary_plots[0] if len(summary_plots) > 0 else None}
return data
if __name__ == "__main__":
#_, df_csv = _evaluate_multi(["lumpy", "delly", "wham", "sv-ensemble"],
# {"DEL": "synthetic_challenge_set3_tumor_20pctmasked_truth_sv_DEL.bed"},
# "syn3-tumor-ensemble-filter.bed", "sv_exclude.bed")
#_, df_csv = _evaluate_multi(["lumpy", "delly", "cn_mops", "sv-ensemble"],
# {"DEL": "NA12878.50X.ldgp.molpb_val.20140508.bed"},
# "NA12878-ensemble.bed", "LCR.bed.gz")
import sys
_plot_evaluation(sys.argv[1])
```
#### File: bcbio/upload/filesystem.py
```python
import os
import shutil
from bcbio import utils
from bcbio.log import logger
from bcbio.upload import shared
def update_file(finfo, sample_info, config, pass_uptodate=False):
"""Update the file in local filesystem storage.
"""
storage_dir = utils.safe_makedir(_get_storage_dir(finfo, config))
if finfo.get("type") == "directory":
return _copy_finfo_directory(finfo, storage_dir)
else:
return _copy_finfo(finfo, storage_dir, pass_uptodate=pass_uptodate)
def get_upload_path(finfo, sample_info, config):
""""Dry" update the file: only return the upload path
"""
try:
storage_dir = _get_storage_dir(finfo, config)
except ValueError:
return None
if finfo.get("type") == "directory":
return _get_dir_upload_path(finfo, storage_dir)
else:
return _get_file_upload_path(finfo, storage_dir)
def _get_storage_dir(finfo, config):
# skip if we have no directory to upload to
if "dir" not in config:
raise ValueError("Expect `dir` in upload specification: "
"http://bcbio-nextgen.readthedocs.io/en/latest/contents/configuration.html#upload")
if "run" in finfo:
storage_dir = os.path.join(config["dir"], finfo["run"])
elif "sample" in finfo:
storage_dir = os.path.join(config["dir"], finfo["sample"])
else:
raise ValueError("Unexpected input file information: %s" % finfo)
if "dir" in finfo:
storage_dir = os.path.join(storage_dir, finfo["dir"])
return storage_dir
def _get_file_upload_path(finfo, storage_dir):
if "sample" in finfo and "ext" in finfo and "type" in finfo:
out_file = os.path.join(storage_dir, "%s-%s%s%s" % (finfo["sample"], finfo["ext"],
"-" if (".txt" in finfo["type"]) else ".",
finfo["type"]))
elif "batch" in finfo and "ext" in finfo and "type" in finfo:
out_file = os.path.join(storage_dir, "%s-%s%s%s" % (finfo["batch"], finfo["ext"],
"-" if (".txt" in finfo["type"]) else ".",
finfo["type"]))
else:
out_file = os.path.join(storage_dir, os.path.basename(finfo["path"]))
return os.path.abspath(out_file)
def _get_dir_upload_path(finfo, storage_dir):
return os.path.abspath(os.path.join(storage_dir, finfo["ext"]))
def _copy_finfo(finfo, storage_dir, pass_uptodate=False):
"""Copy a file into the output storage directory.
"""
out_file = _get_file_upload_path(finfo, storage_dir)
if not shared.up_to_date(out_file, finfo):
logger.info("Storing in local filesystem: %s" % out_file)
shutil.copy(finfo["path"], out_file)
return out_file
if pass_uptodate:
return out_file
def _copy_finfo_directory(finfo, out_dir):
"""Copy a directory into the final output directory.
"""
out_dir = _get_dir_upload_path(finfo, out_dir)
if not shared.up_to_date(out_dir, finfo):
logger.info("Storing directory in local filesystem: %s" % out_dir)
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
shutil.copytree(finfo["path"], out_dir)
for tmpdir in ["tx", "tmp"]:
if os.path.exists(os.path.join(out_dir, tmpdir)):
shutil.rmtree(os.path.join(out_dir, tmpdir))
os.utime(out_dir, None)
return out_dir
```
#### File: bcbio/variation/naming.py
```python
import os
import requests
from bcbio import utils
from bcbio.bam import ref
from bcbio.distributed.transaction import file_transaction
from bcbio.variation import vcfutils
# ## Cached results
GMAP = {}
# read_mapping("https://raw.githubusercontent.com/dpryan79/ChromosomeMappings/master/GRCh37_ensembl2UCSC.txt")
GMAP["hg19"] = {'GL000219.1': 'chrUn_gl000219', 'GL000192.1':
'chr1_gl000192_random', 'GL000236.1': 'chrUn_gl000236', 'GL000211.1':
'chrUn_gl000211', 'GL000234.1': 'chrUn_gl000234', '20': 'chr20', '21': 'chr21',
'22': 'chr22', 'GL000196.1': 'chr8_gl000196_random', 'GL000213.1':
'chrUn_gl000213', 'GL000205.1': 'chr17_gl000205_random', '4': 'chr4',
'GL000222.1': 'chrUn_gl000222', 'GL000215.1': 'chrUn_gl000215', '8': 'chr8',
'GL000232.1': 'chrUn_gl000232', 'GL000242.1': 'chrUn_gl000242', 'GL000244.1':
'chrUn_gl000244', 'GL000223.1': 'chrUn_gl000223', 'GL000229.1':
'chrUn_gl000229', 'GL000240.1': 'chrUn_gl000240', 'X': 'chrX', 'GL000202.1':
'chr11_gl000202_random', 'GL000217.1': 'chrUn_gl000217', 'GL000200.1':
'chr9_gl000200_random', 'GL000230.1': 'chrUn_gl000230', 'GL000206.1':
'chr17_gl000206_random', 'HSCHR6_MHC_QBL': 'chr6_qbl_hap6', 'HSCHR6_MHC_MANN':
'chr6_mann_hap4', 'GL000237.1': 'chrUn_gl000237', 'GL000204.1':
'chr17_gl000204_random', 'GL000235.1': 'chrUn_gl000235', 'HSCHR6_MHC_APD':
'chr6_apd_hap1', 'HSCHR6_MHC_COX': 'chr6_cox_hap2', '3': 'chr3', '7': 'chr7',
'GL000233.1': 'chrUn_gl000233', 'GL000221.1': 'chrUn_gl000221', 'GL000220.1':
'chrUn_gl000220', 'GL000245.1': 'chrUn_gl000245', 'GL000228.1':
'chrUn_gl000228', 'GL000231.1': 'chrUn_gl000231', 'MT': 'chrM',
'HSCHR6_MHC_SSTO': 'chr6_ssto_hap7', 'GL000238.1': 'chrUn_gl000238',
'GL000195.1': 'chr7_gl000195_random', 'GL000249.1': 'chrUn_gl000249', '2':
'chr2', '6': 'chr6', 'GL000247.1': 'chrUn_gl000247', 'GL000199.1':
'chr9_gl000199_random', 'HSCHR6_MHC_DBB': 'chr6_dbb_hap3', 'GL000246.1':
'chrUn_gl000246', 'GL000225.1': 'chrUn_gl000225', 'HSCHR4_1': 'chr4_ctg9_hap1',
'GL000227.1': 'chrUn_gl000227', '11': 'chr11', '10': 'chr10', '13': 'chr13',
'12': 'chr12', '15': 'chr15', '14': 'chr14', '17': 'chr17', '16': 'chr16', '19':
'chr19', '18': 'chr18', 'GL000193.1': 'chr4_gl000193_random', 'GL000210.1':
'chr21_gl000210_random', 'GL000239.1': 'chrUn_gl000239', 'GL000191.1':
'chr1_gl000191_random', 'HSCHR17_1': 'chr17_ctg5_hap1', 'GL000194.1':
'chr4_gl000194_random', 'GL000212.1': 'chrUn_gl000212', 'GL000248.1':
'chrUn_gl000248', 'GL000197.1': 'chr8_gl000197_random', '1': 'chr1', '5':
'chr5', 'GL000208.1': 'chr19_gl000208_random', '9': 'chr9', 'GL000214.1':
'chrUn_gl000214', 'GL000224.1': 'chrUn_gl000224', 'GL000243.1':
'chrUn_gl000243', 'HSCHR6_MHC_MCF': 'chr6_mcf_hap5', 'GL000209.1':
'chr19_gl000209_random', 'GL000203.1': 'chr17_gl000203_random', 'GL000226.1':
'chrUn_gl000226', 'GL000241.1': 'chrUn_gl000241', 'Y': 'chrY', 'GL000201.1':
'chr9_gl000201_random', 'GL000198.1': 'chr9_gl000198_random', 'GL000216.1':
'chrUn_gl000216', 'GL000218.1': 'chrUn_gl000218', 'GL000207.1':
'chr18_gl000207_random'}
#read_mapping("https://raw.githubusercontent.com/dpryan79/ChromosomeMappings/master/GRCh37_UCSC2ensembl.txt")
GMAP["GRCh37"] = {'chr19_gl000208_random': 'GL000208.1',
'chr21_gl000210_random': 'GL000210.1', 'chr6_apd_hap1': 'HSCHR6_MHC_APD',
'chr13': '13', 'chr12': '12', 'chr11': '11', 'chr10': '10', 'chr17': '17',
'chr16': '16', 'chr15': '15', 'chr14': '14', 'chr19': '19', 'chr18': '18',
'chr9_gl000198_random': 'GL000198.1', 'chrUn_gl000239': 'GL000239.1',
'chrUn_gl000238': 'GL000238.1', 'chrUn_gl000233': 'GL000233.1',
'chrUn_gl000232': 'GL000232.1', 'chrUn_gl000231': 'GL000231.1',
'chrUn_gl000230': 'GL000230.1', 'chrUn_gl000237': 'GL000237.1',
'chrUn_gl000236': 'GL000236.1', 'chrUn_gl000235': 'GL000235.1',
'chrUn_gl000234': 'GL000234.1', 'chr6_qbl_hap6': 'HSCHR6_MHC_QBL',
'chr11_gl000202_random': 'GL000202.1', 'chr17_gl000206_random': 'GL000206.1',
'chr6_cox_hap2': 'HSCHR6_MHC_COX', 'chr4_gl000193_random': 'GL000193.1',
'chrUn_gl000248': 'GL000248.1', 'chrUn_gl000249': 'GL000249.1',
'chrUn_gl000246': 'GL000246.1', 'chrUn_gl000247': 'GL000247.1',
'chrUn_gl000244': 'GL000244.1', 'chrUn_gl000245': 'GL000245.1',
'chrUn_gl000242': 'GL000242.1', 'chrUn_gl000243': 'GL000243.1',
'chrUn_gl000240': 'GL000240.1', 'chrUn_gl000241': 'GL000241.1',
'chr17_gl000204_random': 'GL000204.1', 'chr17_ctg5_hap1': 'HSCHR17_1',
'chr17_gl000205_random': 'GL000205.1', 'chr9_gl000199_random': 'GL000199.1',
'chr9_gl000201_random': 'GL000201.1', 'chr8': '8', 'chr6_ssto_hap7':
'HSCHR6_MHC_SSTO', 'chr8_gl000197_random': 'GL000197.1', 'chr6_dbb_hap3':
'HSCHR6_MHC_DBB', 'chr7_gl000195_random': 'GL000195.1', 'chr1_gl000191_random':
'GL000191.1', 'chr4_ctg9_hap1': 'HSCHR4_1', 'chr3': '3', 'chr2': '2', 'chr1':
'1', 'chr17_gl000203_random': 'GL000203.1', 'chrUn_gl000225': 'GL000225.1',
'chrY': 'Y', 'chrX': 'X', 'chr9_gl000200_random': 'GL000200.1', 'chr9': '9',
'chrM': 'MT', 'chr8_gl000196_random': 'GL000196.1', 'chr6_mann_hap4':
'HSCHR6_MHC_MANN', 'chrUn_gl000211': 'GL000211.1', 'chrUn_gl000213':
'GL000213.1', 'chrUn_gl000212': 'GL000212.1', 'chrUn_gl000215': 'GL000215.1',
'chrUn_gl000214': 'GL000214.1', 'chrUn_gl000217': 'GL000217.1',
'chrUn_gl000216': 'GL000216.1', 'chrUn_gl000219': 'GL000219.1',
'chrUn_gl000218': 'GL000218.1', 'chr19_gl000209_random': 'GL000209.1', 'chr22':
'22', 'chr20': '20', 'chr21': '21', 'chr6_mcf_hap5': 'HSCHR6_MHC_MCF', 'chr7':
'7', 'chr6': '6', 'chr5': '5', 'chr4': '4', 'chrUn_gl000228': 'GL000228.1',
'chrUn_gl000229': 'GL000229.1', 'chr1_gl000192_random': 'GL000192.1',
'chrUn_gl000224': 'GL000224.1', 'chr4_gl000194_random': 'GL000194.1',
'chrUn_gl000226': 'GL000226.1', 'chrUn_gl000227': 'GL000227.1',
'chrUn_gl000220': 'GL000220.1', 'chrUn_gl000221': 'GL000221.1',
'chrUn_gl000222': 'GL000222.1', 'chrUn_gl000223': 'GL000223.1',
'chr18_gl000207_random': 'GL000207.1'}
def handle_synonyms(in_file, ref_file, genome_build, work_dir, data):
"""Potentially handle remapping synonymous chromosome names between builds.
Handles tab delimited file formats like BED and VCF where the contig
is in the first column.
"""
if genome_build in GMAP and ref_file:
mappings = GMAP[genome_build]
contigs = set([c.name for c in ref.file_contigs(ref_file)])
out_file = os.path.join(work_dir, "%s-fixed_contigs%s" % utils.splitext_plus(os.path.basename(in_file)))
if not utils.file_exists(out_file):
if out_file.endswith(".gz"):
out_file = out_file.replace(".gz", "")
needs_bgzip = True
else:
needs_bgzip = False
checked_file = "%s.checked" % utils.splitext_plus(out_file)[0]
if not _matches_contigs(in_file, contigs, checked_file):
with file_transaction(data, out_file) as tx_out_file:
_write_newname_file(in_file, tx_out_file, mappings)
if needs_bgzip:
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file
return in_file
def _write_newname_file(in_file, out_file, mappings):
"""Re-write an input file with contigs matching the correct reference.
"""
with utils.open_gzipsafe(in_file) as in_handle:
with open(out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("#"):
out_handle.write(line)
else:
parts = line.split("\t")
new_contig = mappings.get(parts[0])
if new_contig:
parts[0] = new_contig
out_handle.write("\t".join(parts))
def _matches_contigs(in_file, contigs, checked_file):
"""Check if the contigs in the input file match the defined contigs in the reference genome.
"""
tocheck_contigs = 2
if utils.file_exists(checked_file):
with open(checked_file) as in_handle:
return in_handle.read().strip() == "match"
else:
with utils.open_gzipsafe(in_file) as in_handle:
to_check = set([])
for line in in_handle:
if not line.startswith("#"):
to_check.add(line.split()[0])
if len(to_check) >= tocheck_contigs:
break
with open(checked_file, "w") as out_handle:
if any([c not in contigs for c in to_check]):
out_handle.write("different")
return False
else:
out_handle.write("match")
return True
# ## Retrieval of mappings
def read_mapping(url):
mappings = {}
for line in requests.get(url).text.split("\n"):
parts = line.strip().split()
if len(parts) == 2:
first, second = parts
mappings[str(first)] = str(second)
return mappings
```
#### File: bcbio/variation/scalpel.py
```python
from __future__ import print_function
import os
import shutil
try:
import vcf
except ImportError:
vcf = None
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils, shared
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import annotation, bedutils, vcfutils
from bcbio.variation.vcfutils import get_paired_bams, is_paired_analysis, bgzip_and_index
import six
def _scalpel_bed_file_opts(items, config, out_file, region, tmp_path):
variant_regions = bedutils.population_variant_regions(items)
target = shared.subset_variant_regions(variant_regions, region, out_file, items)
if target:
if isinstance(target, six.string_types) and os.path.isfile(target):
target_bed = target
else:
target_bed = os.path.join(tmp_path, "tmp.bed")
if not utils.file_exists(target_bed):
with file_transaction(config, target_bed) as tx_tmp_bed:
if not isinstance(region, (list, tuple)):
message = ("Region must be a tuple - something odd just happened")
raise ValueError(message)
chrom, start, end = region
with open(tx_tmp_bed, "w") as out_handle:
print("%s\t%s\t%s" % (chrom, start, end), file=out_handle)
if any(dd.get_coverage_interval(x) == "genome" for x in items):
target_bed = shared.remove_lcr_regions(target_bed, items)
return ["--bed", target_bed]
else:
return []
def _scalpel_options_from_config(items, config, out_file, region, tmp_path):
opts = []
# output vcf, report only variants within bed regions
opts += ["--format", "vcf", "--intarget"]
# Improve sensitivity in low coverage regions
opts += ["--covthr 3", "--lowcov 1"]
# Avoid oversampling in repeat regions
opts += ["--pathlimit", "10000"]
opts += _scalpel_bed_file_opts(items, config, out_file, region, tmp_path)
resources = config_utils.get_resources("scalpel", config)
if resources.get("options"):
opts += resources["options"]
if "--outratio" not in " ".join(opts):
# add minimum reportable allele frequency, for which Scalpel defaults to 5
# but other somatic tools in bcbio default to 10
min_af = float(utils.get_in(config, ("algorithm",
"min_allele_fraction"), 10)) / 100.0
opts += ["--outratio", str(min_af)]
return opts
def is_installed(config):
"""Check for scalpel installation on machine.
"""
try:
config_utils.get_program("scalpel-discovery", config)
return True
except config_utils.CmdNotFound:
return False
def run_scalpel(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Run Scalpel indel calling, either paired tumor/normal or germline calling.
"""
if region is None:
message = ("A region must be provided for Scalpel")
raise ValueError(message)
if is_paired_analysis(align_bams, items):
call_file = _run_scalpel_paired(align_bams, items, ref_file,
assoc_files, region, out_file)
else:
call_file = _run_scalpel_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return call_file
def _run_scalpel_caller(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect indels with Scalpel.
Single sample mode.
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
if len(align_bams) > 1:
message = ("Scalpel does not currently support batch calling!")
raise ValueError(message)
input_bams = " ".join("%s" % x for x in align_bams)
tmp_path = "%s-scalpel-work" % utils.splitext_plus(out_file)[0]
tx_tmp_path = "%s-scalpel-work" % utils.splitext_plus(tx_out_file)[0]
if os.path.exists(tmp_path):
utils.remove_safe(tmp_path)
opts = " ".join(_scalpel_options_from_config(items, config, out_file, region, tmp_path))
opts += " --dir %s" % tx_tmp_path
min_cov = "3" # minimum coverage
opts += " --mincov %s" % min_cov
perl_exports = utils.get_perl_exports(os.path.dirname(tx_out_file))
cmd = ("{perl_exports} && "
"scalpel-discovery --single {opts} --ref {ref_file} --bam {input_bams} ")
do.run(cmd.format(**locals()), "Genotyping with Scalpel", {})
shutil.move(tx_tmp_path, tmp_path)
# parse produced variant file further
scalpel_tmp_file = bgzip_and_index(os.path.join(tmp_path, "variants.indel.vcf"), config)
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
bcftools_cmd_chi2 = get_scalpel_bcftools_filter_expression("chi2", config)
sample_name_str = items[0]["name"][1]
fix_ambig = vcfutils.fix_ambiguous_cl()
add_contig = vcfutils.add_contig_to_header_cl(dd.get_ref_file(items[0]), tx_out_file)
cl2 = ("{bcftools_cmd_chi2} {scalpel_tmp_file} | "
r"sed 's/FORMAT\tsample\(_name\)\{{0,1\}}/FORMAT\t{sample_name_str}/g' "
"| {fix_ambig} | vcfallelicprimitives -t DECOMPOSED --keep-geno | vcffixup - | vcfstreamsort "
"| {add_contig} {compress_cmd} > {tx_out_file}")
do.run(cl2.format(**locals()), "Finalising Scalpel variants", {})
return out_file
def _run_scalpel_paired(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect indels with Scalpel.
This is used for paired tumor / normal samples.
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-paired-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
paired = get_paired_bams(align_bams, items)
if not paired.normal_bam:
ann_file = _run_scalpel_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return ann_file
vcfstreamsort = config_utils.get_program("vcfstreamsort", config)
perl_exports = utils.get_perl_exports(os.path.dirname(tx_out_file))
tmp_path = "%s-scalpel-work" % utils.splitext_plus(out_file)[0]
db_file = os.path.join(tmp_path, "main", "somatic.db")
if not os.path.exists(db_file + ".dir"):
if os.path.exists(tmp_path):
utils.remove_safe(tmp_path)
opts = " ".join(_scalpel_options_from_config(items, config, out_file, region, tmp_path))
opts += " --ref {}".format(ref_file)
opts += " --dir %s" % tmp_path
# caling
cl = ("{perl_exports} && "
"scalpel-discovery --somatic {opts} --tumor {paired.tumor_bam} --normal {paired.normal_bam}")
do.run(cl.format(**locals()), "Genotyping paired variants with Scalpel", {})
# filtering to adjust input parameters
bed_opts = " ".join(_scalpel_bed_file_opts(items, config, out_file, region, tmp_path))
use_defaults = True
if use_defaults:
scalpel_tmp_file = os.path.join(tmp_path, "main/somatic.indel.vcf")
# Uses default filters but can tweak min-alt-count-tumor and min-phred-fisher
# to swap precision for sensitivity
else:
scalpel_tmp_file = os.path.join(tmp_path, "main/somatic-indel-filter.vcf.gz")
with file_transaction(config, scalpel_tmp_file) as tx_indel_file:
cmd = ("{perl_exports} && "
"scalpel-export --somatic {bed_opts} --ref {ref_file} --db {db_file} "
"--min-alt-count-tumor 5 --min-phred-fisher 10 --min-vaf-tumor 0.1 "
"| bgzip -c > {tx_indel_file}")
do.run(cmd.format(**locals()), "Scalpel somatic indel filter", {})
scalpel_tmp_file = bgzip_and_index(scalpel_tmp_file, config)
scalpel_tmp_file_common = bgzip_and_index(os.path.join(tmp_path, "main/common.indel.vcf"), config)
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
bcftools_cmd_chi2 = get_scalpel_bcftools_filter_expression("chi2", config)
bcftools_cmd_common = get_scalpel_bcftools_filter_expression("reject", config)
fix_ambig = vcfutils.fix_ambiguous_cl()
add_contig = vcfutils.add_contig_to_header_cl(dd.get_ref_file(items[0]), tx_out_file)
cl2 = ("vcfcat <({bcftools_cmd_chi2} {scalpel_tmp_file}) "
"<({bcftools_cmd_common} {scalpel_tmp_file_common}) | "
" {fix_ambig} | {vcfstreamsort} | {add_contig} {compress_cmd} > {tx_out_file}")
do.run(cl2.format(**locals()), "Finalising Scalpel variants", {})
return out_file
def get_scalpel_bcftools_filter_expression(filter_type, config):
bcftools = config_utils.get_program("bcftools", config)
filter_string = "{bcftools} filter -m '+' -O v --soft-filter "
if filter_type == "chi2":
filter_string += "'CHI2FILTER' -e 'INFO/CHI2 > 20.0' "
elif filter_type == "reject":
filter_string += "'REJECT' -e '%TYPE=\"indel\"' "
else:
return "zcat"
return filter_string.format(**locals())
```
#### File: bcbio/workflow/stormseq.py
```python
import json
import os
import yaml
from bcbio import utils
from bcbio.upload import s3
from bcbio.workflow import xprize
def parse_args(args):
parser = xprize.HelpArgParser(description="Run STORMSeq processing on AWS")
parser.add_argument("config_file", help="JSON configuration file with form parameters")
parser.add_argument("base_dir", help="Base directory to process in")
parser.add_argument("bcbio_config_file", help="bcbio system YAML config")
args = parser.parse_args(args)
return args
def _get_s3_files(local_dir, file_info, params):
"""Retrieve s3 files to local directory, handling STORMSeq inputs.
"""
assert len(file_info) == 1
files = file_info.values()[0]
fnames = []
for k in ["1", "2"]:
if files[k] not in fnames:
fnames.append(files[k])
out = []
for fname in fnames:
bucket, key = fname.replace("s3://", "").split("/", 1)
if params["access_key_id"] == "TEST":
out.append(os.path.join(local_dir, os.path.basename(key)))
else:
out.append(s3.get_file(local_dir, bucket, key, params))
return out
def setup(args):
configdir = utils.safe_makedir(os.path.join(args.base_dir, "config"))
inputdir = utils.safe_makedir(os.path.join(args.base_dir, "inputs"))
workdir = utils.safe_makedir(os.path.join(args.base_dir, "work"))
finaldir = utils.safe_makedir(os.path.join(args.base_dir, "ready"))
out_config_file = os.path.join(configdir, "%s.yaml" %
os.path.splitext(os.path.basename(args.config_file))[0])
with open(args.config_file) as in_handle:
ss_config = json.load(in_handle)
ss_params = ss_config["parameters"]
out = {"fc_date": xprize.get_fc_date(out_config_file),
"fc_name": ss_config["sample"],
"upload": {"dir": finaldir,
"method": "s3",
"bucket": ss_params["s3_bucket"],
"access_key_id": ss_params["access_key_id"],
"secret_access_key": ss_params["secret_access_key"]},
"details": [{
"files": _get_s3_files(inputdir, ss_config["files"], ss_params),
"lane": 1,
"description": ss_params["sample"],
"analysis": "variant",
"genome_build": ss_params["genome_version"],
"algorithm": {
"aligner": ss_params["alignment_pipeline"],
"variantcaller": ss_params["calling_pipeline"],
"quality_format": "Standard",
"coverage_interval": "genome" if ss_params["data_type"] == "data_wgs" else "exome",
}}]}
with open(out_config_file, "w") as out_handle:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
return workdir, {"config_file": args.bcbio_config_file,
"run_info_yaml": out_config_file}
```
#### File: bcbio-nextgen/scripts/bcbio_prepare_samples.py
```python
import os
import sys
import yaml
from collections import defaultdict
from argparse import ArgumentParser
from bcbio import log
from bcbio.log import logger
from bcbio.install import _get_data_dir
from bcbio import utils
from bcbio.bam import is_bam
from bcbio.pipeline.sra import is_gsm, is_srr
from bcbio.bam.fastq import is_fastq, combine_pairs
from bcbio.distributed.transaction import file_transaction
from bcbio.distributed import clargs, resources, prun
from bcbio.provenance import system, profile
def create_new_csv(samples, args):
"""create csv file that can be use with bcbio -w template"""
out_fn = os.path.splitext(args.csv)[0] + "-merged.csv"
logger.info("Preparing new csv: %s" % out_fn)
with file_transaction(out_fn) as tx_out:
with open(tx_out, 'w') as handle:
handle.write(_header(args.csv))
for s in samples:
sample_name = s['name'] if isinstance(s['out_file'], list) else os.path.basename(s['out_file'])
handle.write("%s,%s,%s\n" % (sample_name, s['name'], ",".join(s['anno'])))
def _header(fn):
"""read header of csv file"""
l = open(fn).readline()
return l
def _get_samples_to_process(fn, out_dir, config, force_single, separators):
"""parse csv file with one line per file. It will merge
all files that have the same description name"""
out_dir = os.path.abspath(out_dir)
samples = defaultdict(list)
with open(fn) as handle:
for l in handle:
if l.find("description") > 0:
logger.info("Skipping header.")
continue
cols = l.strip().split(",")
if len(cols) > 0:
if len(cols) < 2:
raise ValueError("Line needs 2 values: file and name.")
if utils.file_exists(cols[0]) or is_gsm(cols[0]) or is_srr(cols[0]):
if cols[0].find(" ") > -1:
new_name = os.path.abspath(cols[0].replace(" ", "_"))
logger.warning("Space finds in %s. Linked to %s." % (cols[0], new_name))
logger.warning("Please, avoid names with spaces in the future.")
utils.symlink_plus(os.path.abspath(cols[0]), new_name)
cols[0] = new_name
samples[cols[1]].append(cols)
else:
logger.info("skipping %s, File doesn't exist." % cols[0])
for sample, items in samples.items():
if is_fastq(items[0][0], True):
fn = "fq_merge"
ext = ".fastq.gz"
elif is_bam(items[0][0]):
fn = "bam_merge"
ext = ".bam"
elif is_gsm(items[0][0]):
fn = "query_gsm"
ext = ".fastq.gz"
elif is_srr(items[0][0]):
fn = "query_gsm"
ext = ".fastq.gz"
files = [os.path.abspath(fn_file[0]) if utils.file_exists(fn_file[0]) else fn_file[0] for fn_file in items]
samples[sample] = [{'files': _check_paired(files, force_single, separators),
'out_file': os.path.join(out_dir, sample + ext),
'fn': fn, 'anno': items[0][2:], 'config': config,
'name': sample, 'out_dir': out_dir}]
return [samples[sample] for sample in samples]
def _check_stems(files):
"""check if stem names are the same and use full path then"""
used = set()
for fn in files:
if os.path.basename(fn) in used:
logger.warning("%s stem is multiple times in your file list, "
"so we don't know "
"how to assign it to the sample data in the CSV. "
"We are gonna use full path to make a difference, "
"that means paired files should be in the same folder. "
"If this is a problem, you should rename the files you want "
"to merge. Sorry, no possible magic here." % os.path.basename(fn)
)
return True
used.add(os.path.basename(fn))
return False
def _check_paired(files, force_single, separators):
"""check if files are fastq(.gz) and paired"""
full_name = _check_stems(files)
if files[0].endswith(".bam"):
return files
elif is_gsm(files[0]):
return files
return combine_pairs(files, force_single, full_name, separators)
def get_cluster_view(p):
"""get ipython running"""
from cluster_helper import cluster as ipc
return ipc.cluster_view(p['scheduler'], p['queue'], p['num_jobs'], p['cores_per_job'], start_wait=p['timeout'], extra_params={"resources": p['resources'], "mem": p['mem'], "tag": p['tag'], "run_local": False})
def wait_until_complete(jobs):
"""wait jobs finish"""
return [j.get() for j in jobs]
if __name__ == "__main__":
description = ("Merge multiple files from the same sample to be compatible with bcbio BAM/FASTQ input files")
parser = ArgumentParser(description="Merge fastq or bam files")
parser.add_argument("--csv", required=True, help="csv file with metadata")
parser.add_argument("--out", required=True, help="output dir")
parser.add_argument("--force-single", action='store_true', default=False, help="Treat all files as single reads")
parser.add_argument("--separators", nargs="*",
default=["R", "_", "-", "."],
help="Space separated list of separators that indicates paired files.")
parser.add_argument("--remove-source", action='store_true', default=False,
help="Remove original files.")
parser.add_argument("-n", "--numcores", type=int,
default=1, help="Number of concurrent jobs to process.")
parser.add_argument("-c", "--cores-per-job", type=int,
default=1, help="Number of cores to use.")
parser.add_argument("-m", "--memory-per-job", default=2, help="Memory in GB to reserve per job.")
parser.add_argument("--timeout", default=15, help="Time to wait before giving up starting.")
parser.add_argument("--retries", default=0, type=int,
help=("Number of retries of failed tasks during "
"distributed processing. Default 0 "
"(no retries)"))
parser.add_argument("-s", "--scheduler", help="Type of scheduler to use.",
choices=["lsf", "slurm", "torque", "sge", "pbspro"])
parser.add_argument("-r", "--resources", help="Extra scheduler resource flags.", default=[], action="append")
parser.add_argument("-q", "--queue", help="Queue to submit jobs to.")
parser.add_argument("-p", "--tag", help="Tag name to label jobs on the cluster", default="bcb-prep")
parser.add_argument("-t", "--paralleltype",
choices=["local", "ipython"],
default="local", help="Run with iptyhon")
args = parser.parse_args()
out_dir = os.path.abspath(args.out)
utils.safe_makedir(out_dir)
try:
system_config = os.path.join(_get_data_dir(), "galaxy", "bcbio_system.yaml")
except ValueError as err:
print(err)
print("WARNING: Attempting to read bcbio_system.yaml in the current directory.")
system_config = "bcbio_system.yaml"
if utils.file_exists(system_config):
with open(system_config) as in_handle:
config = yaml.safe_load(in_handle)
else:
print("WARNING: bcbio_system.yaml not found, creating own resources.")
config = {'resources': {}}
res = {'cores': args.cores_per_job, 'memory': f"{args.memory_per_job}g"}
config["algorithm"] = {"num_cores": args.cores_per_job}
config["resources"].update({'sambamba': res,
'samtools': res})
config["log_dir"] = os.path.join(os.path.abspath(os.getcwd()), "log")
parallel = clargs.to_parallel(args)
parallel.update({'progs': ['samtools', 'sambamba']})
parallel = log.create_base_logger(config, parallel)
log.setup_local_logging(config, parallel)
dirs = {'work': os.path.abspath(os.getcwd())}
system.write_info(dirs, parallel, config)
sysinfo = system.machine_info()[0]
config["remove_source"] = args.remove_source
samples = _get_samples_to_process(args.csv, out_dir, config, args.force_single, args.separators)
if not samples:
print("No samples found.")
sys.exit(0)
parallel = resources.calculate(parallel, [samples], sysinfo, config)
with prun.start(parallel, samples, config, dirs) as run_parallel:
with profile.report("prepare bcbio samples", dirs):
samples = run_parallel("prepare_bcbio_samples", samples)
create_new_csv(samples, args)
```
#### File: scripts/utils/build_compare_vcf.py
```python
import os
import sys
import random
import vcf
def main(in_file):
out_file = apply("{0}-cmp{1}".format, os.path.splitext(in_file))
with open(in_file) as in_handle:
with open(out_file, "w") as out_handle:
rdr = vcf.Reader(in_handle)
wtr = vcf.Writer(out_handle, rdr)
for rec in rdr:
out_rec = adjust_variant(rec)
if out_rec:
wtr.write_record(out_rec)
def adjust_variant(rec):
do_change = random.random()
if do_change < 0.2:
return None
elif do_change < 0.5:
return rec
else:
rec.samples = [adjust_genotype(g) for g in rec.samples]
return rec
def adjust_genotype(g):
alts = ["0", "1"]
do_change = random.random()
if do_change < 0.7:
new_gt = None
elif do_change < 0.9:
new_gt = g.gt_phase_char().join(["."] * (len(g.gt_alleles)))
else:
new_gt = g.gt_phase_char().join([random.choice(alts) for x in g.gt_alleles])
if new_gt:
g.data = g.data._replace(GT=new_gt)
return g
if __name__ == "__main__":
main(sys.argv[1])
```
#### File: scripts/utils/hla_loh_comparison.py
```python
from __future__ import print_function
import collections
import csv
import glob
import StringIO as io
import os
import shutil
import subprocess
import sys
import yaml
from bcbio.pipeline import alignment
from bcbio import utils
HLA_GLOB="call-call_hla/shard-*/execution"
SV_GLOB="call-svcall/shard-*/wf-svcall.cwl/*/call-detect_sv/execution"
SVCALL_GLOB="structural/{sample}/{method}/*{ext}"
LOHHLA="../lohhla/lohhla"
ALIGNER="novoalign"
DEPTH_FILTER=5
# hg38 coordinates for HLA region https://www.ncbi.nlm.nih.gov/grc/human/regions/MHC
hla_coords = ("chr6", 28510120, 33480577)
def run_sample(tumor, normal, work_dir, cromwell_dir, hla_fa):
hla_fasta, hlas = prep_hla_ref(hla_fa, work_dir)
hla_cromwell_dir = _get_cromwell_execution_dir(cromwell_dir, HLA_GLOB)
sv_cromwell_dir = _get_cromwell_execution_dir(cromwell_dir, SV_GLOB)
tumor_fastq, tumor_calls_orig = get_hla(tumor, hla_cromwell_dir, HLA_GLOB)
tumor_bam = alignment.align_to_sort_bam(tumor_fastq, None, ALIGNER, get_data(tumor, hla_fasta, work_dir))["work_bam"]
normal_fastq, normal_calls_orig = get_hla(normal, hla_cromwell_dir, HLA_GLOB)
normal_bam = alignment.align_to_sort_bam(normal_fastq, None, ALIGNER, get_data(normal, hla_fasta, work_dir))["work_bam"]
tumor_ploidy = prep_ploidy(work_dir, tumor, tumor_bam, sv_cromwell_dir, os.path.join(SV_GLOB, SVCALL_GLOB))
tumor_calls = prep_hla(work_dir, tumor, normal_calls_orig, hlas, normal_bam, tumor_bam)
normal_calls = prep_hla(work_dir, normal, normal_calls_orig, hlas, normal_bam, tumor_bam)
bam_dir, normal_bam_ready = create_tumor_bamdir(tumor, tumor_bam, normal_bam, work_dir)
out_dir = utils.safe_makedir(os.path.join(work_dir, tumor, "lohhla_out"))
prep_bam_inputs(out_dir, tumor, tumor_calls, tumor_bam)
prep_bam_inputs(out_dir, normal, normal_calls, normal_bam)
lohhla_output = os.path.join(out_dir, "%s.%s.DNA.HLAlossPrediction_CI.xls" % (tumor, DEPTH_FILTER))
cmd = ["Rscript", os.path.join(LOHHLA, "LOHHLAscript.R"),
"--patientId", tumor, "--outputDir", out_dir,
"--normalBAMfile", normal_bam_ready, "--BAMDir", bam_dir,
"--hlaPath", normal_calls, "--HLAfastaLoc", hla_fasta,
"--HLAexonLoc", os.path.join(LOHHLA, "data", "hla.dat"),
"--CopyNumLoc", tumor_ploidy,
"--mappingStep", "FALSE",
"--minCoverageFilter", str(DEPTH_FILTER)]
if not os.path.exists(lohhla_output):
subprocess.check_call(cmd)
compare_calls(tumor, lohhla_output, sv_cromwell_dir, os.path.join(SV_GLOB, SVCALL_GLOB))
def _create_plot(tumor, in_glob, out_ext, page=1):
"""Create an output plot for the given PDF in the images directory.
"""
out_dir = utils.safe_makedir("images")
out_name = os.path.join(out_dir, "%s-%s" % (tumor, out_ext))
in_file = glob.glob(in_glob)[0]
cmd = ["pdftoppm", in_file, out_name, "-png", "-f", page, "-singlefile"]
if not os.path.exists(out_name + ".png"):
subprocess.check_call([str(x) for x in cmd])
return out_name + ".png"
def _get_loh_from_calls(calls):
if calls["loh"]:
return "mixed LOH" if calls["std"] else "LOH"
else:
return "no LOH"
def _compare_lohhla(lohhla_output):
print("#### LOHHLA")
print("```")
seen = set([])
calls = collections.defaultdict(int)
with open(lohhla_output) as in_handle:
header = in_handle.readline().strip().split("\t")
for c in in_handle:
vals = dict(zip(header, c.strip().split("\t")))
key = (vals["HLA_A_type1"], vals["HLA_A_type2"])
if key not in seen:
print([vals[x] for x in ["PVal_unique", "HLA_A_type1", "HLA_A_type2", "HLA_type1copyNum_withBAFBin", "HLA_type2copyNum_withBAFBin"]])
seen.add(key)
if float(vals["PVal_unique"]) < 0.01:
calls["loh"] += 1
else:
calls["std"] += 1
print("```")
return _get_loh_from_calls(calls)
def _compare_purecn(tumor, cromwell_dir, sv_glob):
print("#### PureCN")
calls = collections.defaultdict(int)
pure_base_file = _get_cromwell_file(cromwell_dir, sv_glob, dict(sample=tumor, method="purecn", ext="-purecn.csv"))
pure_cn_file = _get_cromwell_file(cromwell_dir, sv_glob, dict(sample=tumor, method="purecn", ext="loh.csv"))
cov_plot = _create_plot(tumor, os.path.join(os.path.dirname(pure_cn_file), "%s*-purecn.pdf" % tumor), "purecn", 2)
sun_plot = _create_plot(tumor, os.path.join(os.path.dirname(pure_cn_file), "%s*-purecn_local_optima.pdf" % tumor),
"purecn-sunrise")
with open(pure_base_file) as in_handle:
vals = dict(zip(in_handle.readline().strip().replace('"', '').split(","),
in_handle.readline().strip().split(",")))
print()
print("| | |")
print("| --- | --- |")
print("| purity | %s |" % vals["Purity"])
print("| ploidy | %s |" % vals["Ploidy"])
print("```")
with open(pure_cn_file) as in_handle:
in_handle.readline() # header
for line in in_handle:
_, chrom, start, end, _, cn, minor_cn = line.split(",")[:7]
start = int(start)
end = int(end)
if chrom == hla_coords[0] and are_overlapping((start, end), hla_coords[1:]):
print(line.strip().split(",")[1:])
if int(minor_cn) == 0:
calls["loh"] += 1
else:
calls["std"] += 1
print("```")
print("" % (tumor, cov_plot))
print("" % (tumor, sun_plot))
return _get_loh_from_calls(calls)
def _compare_titancna(tumor, cromwell_dir, sv_glob):
print("#### TitanCNA")
calls = collections.defaultdict(int)
titancna_file = _get_cromwell_file(cromwell_dir, sv_glob, dict(sample=tumor, method="titancna", ext="Clusters.txt"))
with open(titancna_file) as in_handle:
vals = dict(zip(in_handle.readline().strip().split("\t"), in_handle.readline().strip().split("\t")))
path = vals["path"]
init_dir, check_dir = os.path.dirname(titancna_file).split("/", 1)
rel_path = init_dir + "/" + path[path.find(check_dir):]
print()
print("| | |")
print("| --- | --- |")
print("| purity | %s |" % vals["purity"])
print("| ploidy | %s |" % vals["ploidy"])
cna_plot = _create_plot(tumor, os.path.join(rel_path, "%s*_CNA.pdf" % tumor), "titan-cna")
loh_plot = _create_plot(tumor, os.path.join(rel_path, "%s*_LOH.pdf" % tumor), "titan-loh")
seg_file = rel_path + ".segs.txt"
out_keys = ["Chromosome", "Start_Position.bp.", "End_Position.bp.", "Copy_Number",
"MinorCN", "MajorCN", "TITAN_call"]
print("```")
with open(seg_file) as in_handle:
header = in_handle.readline().strip().split()
for line in in_handle:
val = dict(zip(header, line.strip().split()))
start = int(val["Start_Position.bp."])
end = int(val["End_Position.bp."])
if val["Chromosome"] == hla_coords[0] and are_overlapping((start, end), hla_coords[1:]):
print([val[k] for k in out_keys])
if int(val["MinorCN"]) == 0:
calls["loh"] += 1
else:
calls["std"] += 1
print("```")
print("" % (tumor, cna_plot))
print("" % (tumor, loh_plot))
return _get_loh_from_calls(calls)
def _compare_gatkcnv(tumor, cromwell_dir, sv_glob):
print("#### GATK CNV")
gatk_file = _get_cromwell_file(cromwell_dir, sv_glob, dict(sample=tumor, method="gatk-cnv", ext="-call.seg"))
orig_model_file = gatk_file.replace("-call.seg", ".modeled.png")
model_file = os.path.join("images", os.path.basename(orig_model_file))
shutil.copy(orig_model_file, model_file)
print("```")
with open(gatk_file) as in_handle:
for line in in_handle:
if not line.startswith("@"):
chrom, start, end = line.split()[:3]
if chrom == hla_coords[0] and are_overlapping((int(start), int(end)), hla_coords[1:]):
print(line.strip())
print("```")
print("" % (tumor, model_file))
def compare_calls(tumor, lohhla_output, cromwell_dir, sv_glob):
summary = collections.OrderedDict()
print("### %s" % tumor)
orig_stdout = sys.stdout
sys.stdout = io.StringIO()
summary["LOHHLA"] = _compare_lohhla(lohhla_output)
summary["PureCN"] = _compare_purecn(tumor, cromwell_dir, sv_glob)
summary["TitanCNA"] = _compare_titancna(tumor, cromwell_dir, sv_glob)
saved_stdout = sys.stdout
sys.stdout = orig_stdout
print()
print("| | |")
print("| --- | --- |")
for k, v in summary.items():
print("| %s | %s |" % (k, v))
sys.stdout.write(saved_stdout.getvalue())
# print("#### CNVkit")
# cnvkit_file = _get_cromwell_file(cromwell_dir, sv_glob, dict(sample=tumor, method="cnvkit", ext="-call.cns"))
# out_keys = ["chromosome", "start", "end", "cn", "cn1", "cn2"]
# print("```")
# with open(cnvkit_file) as in_handle:
# header = in_handle.readline().strip().split()
# for line in in_handle:
# chrom, start, end = line.split()[:3]
# if chrom == hla_coords[0] and are_overlapping((int(start), int(end)), hla_coords[1:]):
# vals = dict(zip(header, line.strip().split()))
# print([vals[k] for k in out_keys])
# print("```")
print
def are_overlapping(r, s):
"""Test if two coordinates overlap.
https://stackoverflow.com/a/27182551
"""
return r[1] >= s[0] and s[1] >= r[0]
def _get_cromwell_file(cromwell_dir, file_glob, kwargs):
fglob = os.path.join(cromwell_dir, file_glob.format(**kwargs))
fs = glob.glob(fglob)
assert len(fs) == 1, (fglob, fs)
return fs[0]
def _get_cromwell_execution_dir(base_dir, target_glob):
"""Retrieve the baseline directory with cromwell output files.
Handles Cromwell restarts where there are multiple work directories and
we traverse symlinks back to the original.
"""
cur_dir = glob.glob(os.path.join(base_dir, target_glob))[0]
if os.path.exists(os.path.join(cur_dir, "cwl.output.json")):
return base_dir
else:
symlink_dir = os.path.dirname(os.path.realpath(os.path.join(cur_dir, "script")))
ref_base = os.path.dirname(base_dir)
new_guid = symlink_dir[symlink_dir.find(ref_base) + len(ref_base) + 1:].split("/")[0]
return _get_cromwell_execution_dir(os.path.join(ref_base, new_guid), target_glob)
def prep_bam_inputs(out_dir, sample, call_file, bam_file):
"""Prepare expected input BAM files from pre-aligned.
"""
base = utils.splitext_plus(os.path.basename(bam_file))[0]
with open(call_file) as in_handle:
for cur_hla in (x.strip() for x in in_handle):
out_file = os.path.join(utils.safe_makedir(os.path.join(out_dir, base)),
"%s.type.%s.filtered.bam" % (base, cur_hla))
if not os.path.exists(out_file):
cmd = ["samtools", "view", "-b","-o", out_file, bam_file, cur_hla]
subprocess.check_call(cmd)
def create_tumor_bamdir(tumor, tumor_bam, normal_bam, work_dir):
"""Create expected input directory with tumor/normal BAMs in one place.
"""
bam_dir = utils.safe_makedir(os.path.join(work_dir, tumor, "in_bams"))
normal_bam_ready = os.path.join(bam_dir, os.path.basename(normal_bam))
utils.symlink_plus(normal_bam, normal_bam_ready)
tumor_bam_ready = os.path.join(bam_dir, os.path.basename(tumor_bam))
utils.symlink_plus(tumor_bam, tumor_bam_ready)
return bam_dir, normal_bam_ready
def get_data(sample, hla_fasta, work_dir):
return {"dirs": {"work": work_dir},
"config": {"analysis": "variant", "algorithm": {"multiple_mappers": "All 9999"},
"resources": {"novoalign": {"options": ["-R", "0"]}}},
"reference": {"bwa": {"indexes": hla_fasta + ".bwt"},
"novoalign": {"indexes": [hla_fasta + ".ndx"]}},
"rgnames": {"sample": sample, "rg": sample, "pl": "illumina", "pu": sample, "lane": sample}}
def get_hla(sample, cromwell_dir, hla_glob):
"""Retrieve HLA calls and input fastqs for a sample.
"""
hla_dir = glob.glob(os.path.join(cromwell_dir, hla_glob, "align", sample, "hla"))[0]
fastq = os.path.join(hla_dir, "OptiType-HLA-A_B_C-input.fq")
calls = os.path.join(hla_dir, "%s-optitype.csv" % sample)
return fastq, calls
def name_to_absolute(x):
"""Convert standard hg38 HLA name into ABSOLUTE naming.
"""
for c in ["-", "*", ":"]:
x = x.replace(c, "_")
x = x.lower()
return x
def get_hla_choice(h, hlas, normal_bam, tumor_bam):
"""Retrieve matching HLA with best read support in both tumor and normal
"""
def get_counts(bam_file):
counts = {}
for line in subprocess.check_output(["samtools", "idxstats", bam_file]).split("\n"):
if line.startswith(h):
name, _, count, _ = line.split()
counts[name] = int(count)
return counts
tcounts = get_counts(tumor_bam)
ncounts = get_counts(normal_bam)
check_hlas = [x for x in hlas if x.startswith(h) and tcounts.get(x, 0) > 0 and ncounts.get(x, 0) > 0]
cur_hlas = sorted(check_hlas, key=lambda x: ncounts[x], reverse=True)
#print(cur_hlas[0], tcounts.get(cur_hlas[0]), ncounts.get(cur_hlas[0]))
return cur_hlas[0]
def prep_hla(work_dir, sample, calls, hlas, normal_bam, tumor_bam):
"""Convert HLAs into ABSOLUTE format for use with LOHHLA.
LOHHLA hard codes names to hla_a, hla_b, hla_c so need to move
"""
work_dir = utils.safe_makedir(os.path.join(work_dir, sample, "inputs"))
hla_file = os.path.join(work_dir, "%s-hlas.txt" % sample)
with open(calls) as in_handle:
with open(hla_file, "w") as out_handle:
next(in_handle) # header
for line in in_handle:
_, _, a, _, _ = line.strip().split(",")
a1, a2 = a.split(";")
out_handle.write(get_hla_choice(name_to_absolute(a1), hlas, normal_bam, tumor_bam) + "\n")
out_handle.write(get_hla_choice(name_to_absolute(a2), hlas, normal_bam, tumor_bam) + "\n")
return hla_file
def prep_ploidy(work_dir, sample, bam_file, cromwell_dir, sv_glob):
"""Create LOHHLA compatible input ploidy file from PureCN output.
"""
purecn_file = _get_cromwell_file(cromwell_dir, sv_glob, dict(sample=sample, method="purecn", ext="purecn.csv"))
work_dir = utils.safe_makedir(os.path.join(work_dir, sample, "inputs"))
out_file = os.path.join(work_dir, "%s-solutions.txt" % sample)
with open(purecn_file) as in_handle:
reader = csv.reader(in_handle)
purecn_stats = dict(zip(next(reader), next(reader)))
with open(out_file, "w") as out_handle:
out_handle.write("Ploidy\ttumorPurity\ttumorPloidy\n")
lohhla_name = utils.splitext_plus(os.path.basename(bam_file))[0]
out_handle.write("%s\t%s\t%s\t%s\n" % (lohhla_name, purecn_stats["Ploidy"],
purecn_stats["Purity"], purecn_stats["Ploidy"]))
return out_file
def prep_hla_ref(hla_fasta, work_dir):
work_dir = utils.safe_makedir(os.path.join(work_dir, "hlaref"))
out_file = os.path.join(work_dir, os.path.basename(hla_fasta))
seen_names = set([])
if not utils.file_uptodate(out_file, hla_fasta):
with open(hla_fasta) as in_handle:
with open(out_file, "w") as out_handle:
for line in in_handle:
if line.startswith(">"):
cur_name = name_to_absolute(line.strip().split()[1])
if cur_name not in seen_names:
out_handle.write(">%s\n" % cur_name)
seen_names.add(cur_name)
write_seq = True
else:
write_seq = False
elif write_seq:
out_handle.write(line)
if not os.path.exists(out_file + ".bwt"):
subprocess.check_call(["bwa", "index", out_file])
if not os.path.exists(out_file + ".ndx"):
subprocess.check_call(["novoindex", out_file + ".ndx", out_file])
hlas = []
with open(out_file) as in_handle:
for line in in_handle:
if line.startswith(">"):
hlas.append(line[1:].strip())
return out_file, hlas
def samples_from_config(sample_yaml):
with open(sample_yaml) as in_handle:
config = yaml.safe_load(in_handle)
by_batch = collections.defaultdict(dict)
for s in config["details"]:
by_batch[s["metadata"]["batch"]][s["metadata"]["phenotype"]] = s["description"]
for bid in sorted(by_batch.keys()):
yield by_batch[bid]["tumor"], by_batch[bid]["normal"]
if __name__ == "__main__":
sample_config, hla_fa, cromwell_dir = sys.argv[1:]
work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "work_lohhla"))
for t, n in sorted(samples_from_config(sample_config)):
run_sample(t, n, work_dir, cromwell_dir, hla_fa)
```
#### File: tests/integration/test_pipeline.py
```python
import os
import sys
import shutil
import subprocess
from bcbio import utils
from bcbio.bam import fastq
from bcbio.distributed import prun
from bcbio.pipeline.config_utils import load_config
from tests.conftest import make_workdir, get_post_process_yaml
import pytest
class TestRunInfo(object):
@pytest.mark.speed1
def test_programs(self, data_dir):
"""Identify programs and versions used in analysis.
"""
from bcbio.provenance import programs
with make_workdir() as workdir:
config = load_config(get_post_process_yaml(data_dir, workdir))
print(programs._get_versions(config))
class TestVCFUtil(object):
"""Test various utilities for dealing with VCF files.
"""
@property
def data_dir(self):
return os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"data"
)
@property
def automated_dir(self):
return os.path.join(self.data_dir, "automated")
@property
def var_dir(self):
return os.path.join(self.data_dir, "variants")
@property
def combo_file(self):
return os.path.join(self.var_dir, "S1_S2-combined.vcf.gz")
@property
def ref_file(self):
return os.path.join(self.data_dir, "genomes", "hg19", "seq", "hg19.fa")
@pytest.mark.speed1
@pytest.mark.combo
def test_1_parallel_vcf_combine(self):
"""Parallel combination of VCF files, split by chromosome.
"""
from bcbio.variation import vcfutils
files = [
os.path.join(self.var_dir, "S1-variants.vcf"),
os.path.join(self.var_dir, "S2-variants.vcf")
]
with make_workdir() as workdir:
config = load_config(
get_post_process_yaml(self.automated_dir, workdir))
config["algorithm"] = {}
region_dir = os.path.join(self.var_dir, "S1_S2-combined-regions")
if os.path.exists(region_dir):
shutil.rmtree(region_dir)
if os.path.exists(self.combo_file):
os.remove(self.combo_file)
reqs = {"type": "local", "cores": 1}
with prun.start(reqs, [[config]], config) as run_parallel:
vcfutils.parallel_combine_variants(
files, self.combo_file, self.ref_file, config, run_parallel)
for fname in files:
if os.path.exists(fname + ".gz"):
subprocess.check_call(["gunzip", fname + ".gz"])
if os.path.exists(fname + ".gz.tbi"):
os.remove(fname + ".gz.tbi")
@pytest.mark.speed1
@pytest.mark.combo
def test_2_vcf_exclusion(self):
"""Exclude samples from VCF files.
"""
from bcbio.variation import vcfutils
with make_workdir() as workdir:
config = load_config(
get_post_process_yaml(self.automated_dir, workdir))
config["algorithm"] = {}
out_file = utils.append_stem(self.combo_file, "-exclude")
to_exclude = ["S1"]
if os.path.exists(out_file):
os.remove(out_file)
vcfutils.exclude_samples(
self.combo_file, out_file, to_exclude, self.ref_file, config)
@pytest.mark.speed1
@pytest.mark.combo
def test_3_vcf_split_combine(self):
"""Split a VCF file into SNPs and indels, then combine back together.
"""
from bcbio.variation import vcfutils
with make_workdir() as workdir:
config = load_config(get_post_process_yaml(
self.automated_dir, workdir))
config["algorithm"] = {}
fname = os.path.join(self.var_dir, "S1-variants.vcf")
snp_file, indel_file = vcfutils.split_snps_indels(
fname, self.ref_file, config)
merge_file = "%s-merge%s.gz" % os.path.splitext(fname)
vcfutils.combine_variant_files(
[snp_file, indel_file], merge_file, self.ref_file, config)
for f in [snp_file, indel_file, merge_file]:
self._remove_vcf(f)
def _remove_vcf(self, f):
for ext in ["", ".gz", ".gz.tbi", ".tbi"]:
if os.path.exists(f + ext):
os.remove(f + ext)
@pytest.mark.speed1
@pytest.mark.combo
def test_4_vcf_sample_select(self, install_test_files, data_dir):
"""Select a sample from a VCF file.
"""
from bcbio.variation import vcfutils
fname = os.path.join(self.var_dir, "S1_S2-combined.vcf.gz")
out_file = "%s-sampleselect%s" % utils.splitext_plus(fname)
out_file = vcfutils.select_sample(fname, "S2", out_file, {})
self._remove_vcf(out_file)
@pytest.mark.speed1
@pytest.mark.combo
def test_5_find_fastq_pairs(self, install_test_files, data_dir):
"""Ensure we can correctly find paired fastq files.
"""
test_pairs = ["/path/to/input/D1HJVACXX_2_AAGAGATC_1.fastq",
"/path/to/input/D1HJVACXX_5_AAGAGATC_1.fastq",
"/path/2/input/D1HJVACXX_2_AAGAGATC_2.fastq",
"/path/2/input/D1HJVACXX_5_AAGAGATC_2.fastq"]
out = fastq.combine_pairs(test_pairs)
assert out[0] == ["/path/to/input/D1HJVACXX_2_AAGAGATC_1.fastq",
"/path/2/input/D1HJVACXX_2_AAGAGATC_2.fastq"], out[0]
assert out[1] == ["/path/to/input/D1HJVACXX_5_AAGAGATC_1.fastq",
"/path/2/input/D1HJVACXX_5_AAGAGATC_2.fastq"], out[1]
test_pairs = ["/path/to/input/Tester_1_fastq.txt",
"/path/to/input/Tester_2_fastq.txt"]
out = fastq.combine_pairs(test_pairs)
assert out[0] == test_pairs, out[0]
```
#### File: unit/pipeline/test_datadict.py
```python
from bcbio.pipeline import datadict as dd
def test_get_fusion_caller():
data = {
'config': {
'algorithm': {
'fusion_caller': 'FUSION_CALLER',
},
},
}
result = dd.get_fusion_caller(data)
assert result == 'FUSION_CALLER'
``` |
{
"source": "a114m/dihlab",
"score": 3
} |
#### File: dihlab/desserts/serializers.py
```python
from django.contrib.auth import get_user_model
from desserts.models import Dessert, Order, OrderDessert, Wishlist, CartItem
from rest_framework import serializers, exceptions
User = get_user_model()
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'first_name', 'last_name')
class DessertSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Dessert
fields = ('id', 'url', 'name', 'price', 'description', 'image', 'calories',)
class CartSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = CartItem
fields = ('quantity', 'dessert')
dessert = DessertSerializer()
class CartItemSerializer(serializers.Serializer):
dessert_id = serializers.IntegerField(required=True, write_only=True)
quantity = serializers.IntegerField(default=1)
dessert = DessertSerializer(read_only=True)
class CartItemsSerializer(serializers.Serializer):
desserts = serializers.ListField(child=CartItemSerializer())
def create(self, validated_data):
"""Add desserts to cart."""
# Get current user
user = self.context['request'].user
# Get current car items
current_cart = CartItem.objects.filter(owner=user)
current_cart_items_ids = map(lambda item: item.dessert_id, current_cart)
# List to prepare items in-memory for bulk save
cart_items = list()
for dessert_data in validated_data['desserts']:
dessert_id = dessert_data['dessert_id']
# Break and return 400 if a dessert already in cart
if dessert_id in current_cart_items_ids:
raise exceptions.ParseError(detail="Dessert (%s) alraedy exists in the cart" % dessert_id)
return
try: # Check if item_id added to cart is valid dessert_id else return 400
dessert = Dessert.objects.get(id=dessert_id) # FIXME: Bulk query instead of looping
except Dessert.DoesNotExist:
raise exceptions.ParseError(detail="No such Dessert ID (%s)" % dessert_id)
return
# Create CartItem in memory and add to cart_items list
quantity = dessert_data['quantity']
cart_items.append(
CartItem(owner=user, dessert=dessert, quantity=quantity)
)
# Bulk save cart_items to DB
result = CartItem.objects.bulk_create(cart_items)
# Merge recently added items with the current ones in 'shared_with'cart to resturn
# response with all items in cart
response = {'desserts': list(current_cart) + result}
return response
class OrderDessertSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = OrderDessert
fields = ('quantity', 'dessert')
dessert = DessertSerializer(many=False, read_only=True)
class OrderSerializer(serializers.ModelSerializer):
class Meta:
model = Order
fields = ('id', 'created_at', 'url', 'orderdessert_set',)
orderdessert_set = OrderDessertSerializer(many=True, read_only=True)
class WishlistSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Wishlist
fields = ('id', 'url', 'name', 'owner', 'desserts', 'shared_with',)
``` |
{
"source": "a114m/zyda",
"score": 3
} |
#### File: archivist/zyda/mongo_client.py
```python
from os import getenv
import pymongo
import logging
import traceback
logger = logging.getLogger(__name__)
class MongoDBClient(object):
def __init__(self):
self.host_name = getenv('MONGO_HOST', 'localhost')
self.port = getenv('MONGO_PORT', 27017)
self.db_name = getenv('MONGO_DB', 'zyda')
self.collection_name = getenv('MONGO_COLLECTION', 'restaurants')
self.client = pymongo.MongoClient(self.host_name, self.port)
self.collection = self.client[self.db_name][self.collection_name]
def get_collection(self):
return self.collection
def add_or_update(self, item):
try:
res = self.collection.replace_one({'id': item['id']}, item, upsert=True)
return True if res.upserted_id else False
except Exception as e:
logger.error(
"Error adding item to mongo collection '%s': %s\n%s" %
(self.collection.name, traceback.format_exc(), getattr(e, 'details', e))
)
return False
```
#### File: archivist/zyda/rabbitmq_client.py
```python
from time import sleep
from os import getenv
import pika
import logging
logger = logging.getLogger(__name__)
class RabbitMQClient():
def __init__(self):
self.host_name = getenv('BROKER_HOST', 'localhost')
self.port = int(getenv('BROKER_PORT', 5672))
self.userid = getenv('BROKER_USERID', 'guest')
self.password = getenv('BROKER_PASSWORD', '<PASSWORD>')
self.queue = getenv('BROKER_QUEUE', 'scraped_items')
self.exchange = getenv('BROKER_EXCHANGE', '')
self.credentials = pika.PlainCredentials(self.userid, self.password)
self.connection_params = pika.ConnectionParameters(host=self.host_name, port=self.port, credentials=self.credentials)
self._open_connection()
def _open_connection(self):
self.connection = pika.BlockingConnection(self.connection_params)
self.channel = self.connection.channel()
self.channel.queue_declare(queue=self.queue)
def start_consuming(self, callback):
while True:
try:
self.channel.basic_consume(callback,
queue=self.queue,
no_ack=True)
self.channel.start_consuming()
except pika.exceptions.AMQPConnectionError as err:
logger.error("Error occurred while connecting to RabbitMQ: %s" % err)
logger.info("Trying to listen again in 3 seconds")
sleep(3)
self._open_connection()
``` |
{
"source": "a115/json-graph-repl",
"score": 3
} |
#### File: json-graph-repl/jgrepl/repl.py
```python
import os
import json
from collections import defaultdict
from cmd2 import Cmd
VERSION = '0.1.4'
class JSONRepl(Cmd):
def __init__(self, filepath):
self.allow_cli_args = False
Cmd.__init__(self, use_ipython=True)
self.filepath = filepath
self._load_graph()
self.intro = "JSON Graph REPL v.{}".format(VERSION)
self._last_item = None
def _load_graph(self):
''' Load the graph from a JSON file and pre-compute some helper
data structures for speedier access. '''
self.poutput("*** Loading graph from '{}'...".format(self.filepath))
with open(self.filepath) as json_file:
self.graph = json.load(json_file)['graph']
self._set_cwd('/')
self._nodes = {}
self._edges = {}
self._children = defaultdict(set)
self._parents = defaultdict(set)
for edge in self.graph['edges']:
self._children[edge['source'].upper()].add(edge['target'].upper())
self._parents[edge['target'].upper()].add(edge['source'].upper())
self._edges[(edge['source'].upper(), edge['target'].upper())] = edge
for node in self.graph['nodes']:
self._nodes[node['id'].upper()] = node
self.root_nodes = [n['id'].upper() for n in self.graph['nodes']
if not self._parents[n['id'].upper()]]
def _current_node_id(self):
return self.cwd.split('/')[-1]
def _children_for(self, node_id):
if node_id:
return self._children[node_id]
return self.root_nodes
def _current_children(self):
return self._children_for(self._current_node_id())
def _current_node(self):
return self._nodes[self._current_node_id()]
def _iter_paths_for(self, node_id, path=''):
path = os.path.join(node_id, path) if path else node_id
if not self._parents[node_id]:
yield os.path.join('/', path)
else:
for parent_id in self._parents[node_id]:
yield from self._iter_paths_for(parent_id, path=path)
def _set_cwd(self, cwd):
self.cwd = cwd
self.prompt = "{}> ".format(self.cwd)
def _parse_args(self, args_str):
args = []
opts = []
for arg in args_str.split(' '):
if arg.startswith('-'):
arg = arg.strip('--').strip('-')
opts.append(arg)
else:
args.append(arg)
return args, opts
def _match(self, node, attrs):
matches = {}
for attr, val in attrs.items():
if (attr in node) and ((not val) or (str(node[attr]) == val)):
matches[attr] = node[attr]
continue
data = node.get('metadata', {})
if (attr in data) and ((not val) or (str(data[attr]) == val)):
matches[attr] = data[attr]
continue
break
else:
return matches
################################
# Generic command definitions:
#
def do_pwd(self, _args):
''' Print the current path '''
self.poutput(self.cwd)
def do_cd(self, args):
''' Change to a new path '''
if args == '$':
args = self._last_item or '/'
if args.startswith('/'):
self._set_cwd('/')
args.strip('/')
for component in args.upper().split('/'):
if not component:
continue
if component == '..':
self._set_cwd(os.path.abspath(os.path.join(self.cwd, '..')))
else:
if component in self._current_children():
self._set_cwd(os.path.join(self.cwd, component))
else:
self.perror("Node not found: '{}'\n".format(component))
def do_ls(self, args):
''' List all nodes under the current path '''
args, opts = self._parse_args(args)
current_children = {node_id: self._nodes[node_id]
for node_id in self._current_children()}
id_width = 1
type_width = 1
for node_id, node in current_children.items():
id_width = max(id_width, len(node_id))
type_width = max(type_width, len(node['type']))
sorted_children = sorted(current_children.items(),
key=lambda n: n[1]['type'])
for node_id, node in sorted_children:
output_line = "{}".format(node_id)
if 'l' in opts:
node = self._nodes[node_id]
output_line = '{0:<{id_width}} {1:<{type_width}} {2}'.format(
node_id, node['type'], node['label'],
id_width=id_width, type_width=type_width)
self.poutput(output_line)
def do_info(self, args):
''' Print information about the current node '''
args, opts = self._parse_args(args)
node = self.graph
if self.cwd == '/':
self.poutput("CURRENT GRAPH: {} ('{}')".format(node.get('label', ''), self.filepath))
self.poutput("GRAPH TYPE: {}".format(node.get('type')))
self.poutput("NODES: {}".format(len(node['nodes'])))
self.poutput("EDGES: {}".format(len(node['edges'])))
else:
node = self._current_node()
self.poutput("NODE ID: {}".format(node['id']))
self.poutput("NODE TYPE: {}".format(node['type']))
self.poutput("NODE LABEL: {}".format(node.get('label', '')))
meta_output = json.dumps(node.get('metadata', {}), indent=4)
self.poutput(meta_output)
def do_find(self, args):
''' Find all paths to a given node ID '''
args, opts = self._parse_args(args)
if args:
search_id = args[0].upper()
if search_id in self._nodes:
for path in self._iter_paths_for(search_id):
self._last_item = path
self.poutput(path)
def do_grep(self, args):
'''
Show IDs of nodes that match all of the supplied attributes. E.g:
> grep node_type=food available_to
shows all nodes of type 'food' that have an 'available_to' attribute
(regardless of its value)
'''
args, opts = self._parse_args(args)
attrs = {}
for arg in args:
attr, _, val = arg.partition('=')
attrs[attr] = val
for node_id, node in self._nodes.items():
matches = self._match(node, attrs)
if matches:
self.poutput(node_id)
for attr, val in matches.items():
self.poutput("\t{} = {}".format(attr, val))
self.poutput("")
def do_explain(self, args):
''' Display the names of all nodes in the current path. '''
components = self.cwd.split('/')
prefix = ""
for component in components:
if component:
self.poutput(prefix + self._nodes[component].get('label', component))
prefix += '\t'
``` |
{
"source": "a115/python-dating",
"score": 4
} |
#### File: python-dating/dating/ranges.py
```python
from arrow import Arrow, get as get_arrow
class DateTimeRange:
""" A range class representing data and operations between two points in time. """
def __init__(self, start=None, end=None):
""" Initialise a new DateTimeRange from the given start and end Arrow objects. """
self._start_arrow = start or Arrow.min
self._end_arrow = end or Arrow.max
self._pointer = None
@classmethod
def from_strings(cls, start_str=None, end_str=None):
""" Initialise a new DateTimeRange from the given start and end datetime strings. """
return cls(start=get_arrow(start_str) if start_str else None,
end=get_arrow(end_str) if end_str else None)
@classmethod
def from_datetimes(cls, start_datetime, end_datetime):
""" Initialise a new DateTimeRange from the given start and end datetime objects. """
return cls(start=Arrow.fromdatetime(start_datetime) if start_datetime else None,
end=Arrow.fromdatetime(end_datetime) if end_datetime else None)
@classmethod
def month_for_arrow(cls, an_arrow):
""" Return the start and end of the month, in which the Arrow object belongs. """
return cls(*an_arrow.span('month'))
@classmethod
def month_for_datetime(cls, a_datetime):
""" Return the start and end of the month, in which the datetime object belongs. """
return cls.month_for_arrow(Arrow.fromdatetime(a_datetime))
@property
def start_arrow(self):
return self._start_arrow
@property
def end_arrow(self):
return self._end_arrow
@property
def start_datetime(self):
return self._start_arrow.datetime
@property
def end_datetime(self):
return self._end_arrow.datetime
@property
def start_date(self):
return self._start_arrow.date()
@property
def end_date(self):
return self._end_arrow.date()
def __str__(self):
return "[{} - {}]".format(self._start_arrow, self._end_arrow)
def __repr__(self):
return self.__str__()
def is_well_defined(self):
return ((self._start_arrow > Arrow.min) and
(self._end_arrow < Arrow.max))
def __contains__(self, a_datetime):
return a_datetime >= self._start_arrow and a_datetime <= self._end_arrow
def __iter__(self):
self._pointer = self._start_arrow
return self
def __next__(self):
if self._pointer <= self._end_arrow:
ret = self._pointer
self._pointer = self._pointer.shift(days=1)
return ret
raise StopIteration
def __eq__(self, other):
return ((self._start_arrow == other.start_arrow) and
(self._end_arrow == other.end_arrow))
def __lt__(self, other):
return ((self._start_arrow < other.start_arrow) or
((self._start_arrow == other.start_arrow) and (self._end_arrow < other.end_arrow)))
def __le__(self, other):
return (self._start_arrow <= other.start_arrow) and (self._end_arrow <= other.end_arrow)
def __gt__(self, other):
return ((self._start_arrow > other.start_arrow) or
(self._start_arrow == other.start_arrow) and (self._end_arrow > other.end_arrow))
def __ge__(self, other):
return (self._start_arrow >= other.start_arrow) and (self._end_arrow >= other.end_arrow)
```
#### File: python-dating/tests/test_dating_ranges.py
```python
import unittest
from datetime import date, datetime
from itertools import islice
from random import shuffle
from arrow import Arrow, get as get_arrow
from dating.ranges import DateTimeRange
class TestDateTimeRangeWithArrows(unittest.TestCase):
""" Testing the DateTimeRange class with Arrow objects """
def setUp(self):
self.now = Arrow.utcnow()
self.start = self.now
self.end = self.now.shift(days=15)
self.before_start = self.now.shift(days=-1)
self.after_end = self.end.shift(days=1)
self.dtr = DateTimeRange(self.start, self.end)
self.left_open_dtr = DateTimeRange(None, self.end)
self.right_open_dtr = DateTimeRange(self.start, None)
def test_str(self):
expected_str = "[{} - {}]".format(self.start, self.end)
self.assertEqual(str(self.dtr), expected_str)
def test_arrow_properties(self):
self.assertEqual(self.dtr.start_arrow, self.start)
self.assertEqual(self.dtr.end_arrow, self.end)
def test_datetime_properties(self):
self.assertEqual(self.dtr.start_datetime, self.start.datetime)
self.assertEqual(self.dtr.end_datetime, self.end.datetime)
def test_date_properties(self):
self.assertEqual(self.dtr.start_date, self.start.date())
self.assertEqual(self.dtr.end_date, self.end.date())
def test_string_constructor_with_datetimes(self):
start_str = "2017-01-12T14:25:10"
end_str = "2017-02-15T07:00:01"
dtr1 = DateTimeRange.from_strings(start_str, end_str)
self.assertEqual(dtr1.start_datetime, get_arrow(start_str))
self.assertEqual(dtr1.end_datetime, get_arrow(end_str))
def test_string_constructor_with_dates(self):
dtr1 = DateTimeRange.from_strings("2017-01-12", "2017-02-15")
self.assertEqual(dtr1.start_date, date(2017, 1, 12))
self.assertEqual(dtr1.end_date, date(2017, 2, 15))
def test_month_for_arrow(self):
mdtr = DateTimeRange.month_for_arrow(Arrow(2017, 2, 14, 15, 30))
self.assertEqual(mdtr.start_datetime, Arrow(2017, 2, 1).floor('day').datetime)
self.assertEqual(mdtr.end_datetime, Arrow(2017, 2, 28).ceil('day').datetime)
def test_month_for_datetime(self):
mdtr = DateTimeRange.month_for_datetime(datetime(2017, 2, 14, 15, 30))
self.assertEqual(mdtr.start_datetime, Arrow(2017, 2, 1).floor('day').datetime)
self.assertEqual(mdtr.end_datetime, Arrow(2017, 2, 28).ceil('day').datetime)
def test_dtr_membership(self):
self.assertTrue(self.start in self.dtr)
self.assertTrue(self.start.shift(days=1) in self.dtr)
self.assertTrue(self.end in self.dtr)
self.assertFalse(self.before_start in self.dtr)
self.assertFalse(self.after_end in self.dtr)
def test_open_dtr_membership(self):
self.assertTrue(self.before_start in self.left_open_dtr)
self.assertTrue(self.after_end in self.right_open_dtr)
self.assertFalse(self.after_end in self.left_open_dtr)
self.assertFalse(self.before_start in self.right_open_dtr)
def test_is_well_defined(self):
self.assertTrue(self.dtr.is_well_defined())
self.assertFalse(self.left_open_dtr.is_well_defined())
self.assertFalse(self.right_open_dtr.is_well_defined())
def test_iterator(self):
expected_dates = [self.start.shift(days=x) for x in range(16)]
self.assertEqual(list(self.dtr), expected_dates)
self.assertEqual(list(islice(self.right_open_dtr, 16)), expected_dates)
def test_eq(self):
dtr2 = DateTimeRange.from_datetimes(self.start.datetime, self.end.datetime)
self.assertEqual(self.dtr, dtr2)
def test_gt(self):
dtr2 = DateTimeRange(self.before_start)
self.assertTrue(self.dtr > dtr2)
dtr3 = DateTimeRange(self.start.shift(days=2))
self.assertTrue(dtr3 > self.dtr)
def test_lt(self):
dtr2 = DateTimeRange(self.before_start)
self.assertTrue(dtr2 < self.dtr)
dtr3 = DateTimeRange(self.start)
self.assertTrue(self.dtr < dtr3)
def test_gte(self):
dtr2 = DateTimeRange(self.start, self.start.shift(days=12))
self.assertTrue(self.dtr >= dtr2)
def test_lte(self):
dtr2 = DateTimeRange(self.start.shift(days=1), self.start.shift(days=15))
self.assertTrue(self.dtr <= dtr2)
def test_sort(self):
dtr1 = DateTimeRange(self.before_start)
dtr2 = DateTimeRange(self.start.shift(days=1), self.start.shift(days=15))
ranges = [self.dtr, dtr2, dtr1, self.left_open_dtr, self.right_open_dtr]
shuffle(ranges)
self.assertEqual(sorted(ranges),
[self.left_open_dtr, dtr1, self.dtr, self.right_open_dtr, dtr2])
``` |
{
"source": "a11ce/playahighpitchedsound",
"score": 3
} |
#### File: a11ce/playahighpitchedsound/pyTone.py
```python
from __future__ import division
import math
from pyaudio import PyAudio # sudo apt-get install python{,3}-pyaudio
try:
from itertools import izip
except ImportError: # Python 3
izip = zip
xrange = range
def sine_tone(frequency, duration, volume=1, sample_rate=22050):
n_samples = int(sample_rate * duration)
restframes = n_samples % sample_rate
p = PyAudio()
stream = p.open(format=p.get_format_from_width(1), # 8bit
channels=1, # mono
rate=sample_rate,
output=True)
s = lambda t: volume * math.sin(2 * math.pi * frequency * t / sample_rate)
samples = (int(s(t) * 0x7f + 0x80) for t in xrange(n_samples))
for buf in izip(*[samples]*sample_rate): # write several samples at a time
stream.write(bytes(bytearray(buf)))
# fill remainder of frameset with silence
stream.write(b'\x80' * restframes)
stream.stop_stream()
stream.close()
p.terminate()
``` |
{
"source": "A11en0/Bert-Text-Classification",
"score": 3
} |
#### File: A11en0/Bert-Text-Classification/model.py
```python
import torch
from torch import nn
import transformers as ppb
from torch.autograd import Variable
from transformers import DistilBertModel
class BiLSTM(nn.Module):
def __init__(self, embedding_dim, hidden_dim, dropout=0.5, output_size=2):
super(BiLSTM, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
# self.num_layers = num_layers
self.dropout = dropout
self.lstm = nn.LSTM(input_size=embedding_dim, hidden_size=hidden_dim, bidirectional=True)
self.W = nn.Linear(hidden_dim * 2, embedding_dim, bias=False)
self.b = nn.Parameter(torch.ones([embedding_dim]))
def forward(self, X):
input = X.transpose(0, 1) # input : [n_step, batch_size, n_class]
hidden_state = torch.zeros(1*2, len(X), self.hidden_dim) # [num_layers(=1) * num_directions(=2), batch_size, n_hidden]
cell_state = torch.zeros(1*2, len(X), self.hidden_dim) # [num_layers(=1) * num_directions(=2), batch_size, n_hidden]
outputs, (_, _) = self.lstm(input, (hidden_state, cell_state))
outputs = outputs[-1] # [batch_size, n_hidden]
model = self.W(outputs) + self.b # model : [batch_size, n_class]
return model
class LSTM_Net(nn.Module):
def __init__(self, embedding, embedding_dim, hidden_dim, num_layers, dropout=0.5,
fix_embedding=True):
super(LSTM_Net, self).__init__()
self.embedding = torch.nn.Embedding(embedding.size(0),embedding.size(1))
self.embedding.weight = torch.nn.Parameter(embedding)
# 是否将 embedding fix住,如果fix_embedding为False,在训练过程中,embedding也会跟着被训练
self.embedding.weight.requires_grad = False if fix_embedding else True
self.embedding_dim = embedding.size(1)
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=num_layers, batch_first=True)
self.classifier = nn.Sequential( nn.Dropout(dropout),
nn.Linear(hidden_dim, 5),
nn.Softmax() )
def forward(self, inputs):
inputs = self.embedding(inputs)
x, _ = self.lstm(inputs, None)
# x 的 dimension (batch, seq_len, hidden_size)
# 取用 LSTM 最后一层的 hidden state
x = x[:, -1, :]
x = self.classifier(x)
return x
class Bert_Net(nn.Module):
# def __init__(self, bert_config, tagset_size, embedding_dim, hidden_dim, rnn_layers, dropout_ratio, dropout1, use_cuda=False):
def __init__(self, embedding_dim, hidden_dim, num_layers, dropout=0.5, output_size=2):
super(Bert_Net, self).__init__()
self.pretrained_weights = 'distilbert-base-uncased'
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = nn.Dropout(p=dropout)
# self.crf = CRF(target_size=tagset_size, average_batch=True, use_cuda=use_cuda)
# self.liner = nn.Linear(hidden_dim*2, tagset_size+2)
# self.model_class, self.tokenizer_class, self.pretrained_weights = (
# ppb.DistilBertModel, ppb.DistilBertTokenizer, 'distilbert-base-uncased')
# self.word_embeds = BertModel.from_pretrained(bert_config)
self.word_embeds = DistilBertModel.from_pretrained(self.pretrained_weights)
self.gama = 0.5
da = hidden_dim
db = int(da/2)
for param in self.word_embeds.parameters():
param.requires_grad = False
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=num_layers, batch_first=True)
# self.W1 = nn.Linear(2 * hidden_dim + embedding_dim,
# da) # (da, 2u+d) => (hidden_size, embedding_dim+2*hidden_size)
# self.w1 = nn.Linear(da, 1, bias=False)
# self.W2 = nn.Linear(embedding_dim, db)
# self.w2 = nn.Linear(db, 1, bias=False)
# self.output = nn.Linear(2 * hidden_dim + embedding_dim, output_size)
self.classifier = nn.Sequential(self.dropout,
nn.Linear(hidden_dim, 2),
nn.Softmax())
# def self_attention(self, H):
# # H: batch_size, seq_len, 2*hidden_size
# hidden_size = H.size()[-1]
# Q = H
# K = H
# V = H
# atten_weight = F.softmax(torch.bmm(Q, K.permute(0, 2, 1))/math.sqrt(hidden_size), -1) # batch_size, seq_len, seq_len
# A = torch.bmm(atten_weight, V) # batch_size, seq_len, 2*hidden_size
# A = A.permute(0, 2, 1) # batch_size, 2*hidden_size, seq_len
# # q: short text representation
# q = F.max_pool1d(A, A.size()[2]).squeeze(-1) # batch_size, 2*hidden_size ==> (128, 128, 1).squeeze(-1) -> (128, 128)
# return q
#
# def cst_attention(self, c, q):
# # c: batch_size, concept_seq_len, embedding_dim
# # q: batch_size, 2*hidden_size
# # print(q.size())
# # print(c.size())
# q = q.unsqueeze(1)
# q = q.expand(q.size(0), c.size(1), q.size(2))
# c_q = torch.cat((c, q), -1) # batch_size, concept_seq_len, embedding_dim+2*hidden_size
# c_q = self.w1(F.tanh(self.W1(c_q))) # batch_size, concept_seq_len, 1
# alpha = F.softmax(c_q.squeeze(-1), -1) # batch_size, concept_seq_len
#
# return alpha
#
# def ccs_attention(self, c):
# # c: batch_size, concept_seq_len, embedding_dim
# c = self.w2(F.tanh(self.W2(c))) # batch_size, concept_seq_len, 1
# beta = F.softmax(c.squeeze(-1), -1) # batch_size, concept_seq_len
#
# return beta
def forward(self, sentence, attention_mask=None):
'''
args:
sentence (word_seq_len, batch_size) : word-level representation of sentence
hidden: initial hidden state
return:
crf output (word_seq_len, batch_size, tag_size, tag_size), hidden
'''
embeds = self.word_embeds(sentence, attention_mask=attention_mask)[0]
out, hidden = self.lstm(embeds)
out = out[:, -1, :]
out = self.classifier(out)
return out
# def forward(self, sentence, attention_mask=None):
# '''
# args:
# sentence (word_seq_len, batch_size) : word-level representation of sentence
# hidden: initial hidden state
# return:
# crf output (word_seq_len, batch_size, tag_size, tag_size), hidden
# '''
# # batch_size = sentence.size(0)
# # seq_length = sentence.size(1)
#
# embeds = self.word_embeds(sentence, attention_mask=attention_mask)[0]
# out, hidden = self.lstm(embeds)
# out = out[:, -1, :]
# out = self.classifier(out)
# return out
``` |
{
"source": "a11smiles/Azure-Backlog-Generator",
"score": 2
} |
#### File: tests/helpers/test_validation.py
```python
from mock import Mock, MagicMock
from tests.mockedfiles import MockedFiles
import azbacklog.helpers as helpers
def test_validateMetadata():
v = helpers.Validation()
assert v.validateMetadata('./somepath/metadata.json', None, MockedFiles._mockConfig()) == (False, "metadata in './somepath/metadata.json' is empty")
v._validateTitle = MagicMock(return_value=(False, "no title"))
assert v.validateMetadata('./somepath/metadata.json', {
'description': 'lorem desc',
'tags': ['01_Folder'],
'roles': []
}, MockedFiles._mockConfig()) == (False, "no title")
v._validateTitle = MagicMock(return_value=True)
v._validateDescription = MagicMock(return_value=(False, "no description"))
assert v.validateMetadata('./somepath/metadata.json', {
'title': 'lorem ipsum',
'tags': ['01_Folder'],
'roles': []
}, MockedFiles._mockConfig()) == (False, "no description")
v._validateTitle = MagicMock(return_value=True)
v._validateDescription = MagicMock(return_value=True)
v._validateTags = MagicMock(return_value=(False, "no tags"))
assert v.validateMetadata('./somepath/metadata.json', {
'title': 'lorem ipsum',
'description': 'lorem desc',
'roles': []
}, MockedFiles._mockConfig()) == (False, "no tags")
v._validateTitle = MagicMock(return_value=True)
v._validateDescription = MagicMock(return_value=True)
v._validateTags = MagicMock(return_value=True)
v._validateRoles = MagicMock(return_value=(False, "no roles"))
assert v.validateMetadata('./somepath/metadata.json', {
'title': 'lorem ipsum',
'description': 'lorem desc',
'tags': ['01_Folder']
}, MockedFiles._mockConfig()) == (False, "no roles")
v._validateTitle = MagicMock(return_value=True)
v._validateDescription = MagicMock(return_value=True)
v._validateTags = MagicMock(return_value=True)
v._validateRoles = MagicMock(return_value=True)
assert v.validateMetadata('./somepath/metadata.json', {
'title': 'lorem ipsum',
'description': 'lorem desc',
'tags': ['01_Folder'],
'roles': []
}, MockedFiles._mockConfig()) is True
def test_validateTitle():
v = helpers.Validation()
assert v._validateTitle('./somepath/metadata.json', {}) == (False, "'title' property not found in metadata './somepath/metadata.json'")
assert v._validateTitle('./somepath/metadata.json', {'title': ''}) == (False, "'title' property not formatted correctly in metadata './somepath/metadata.json'")
assert v._validateTitle('./somepath/metadata.json', {'title': 10}) == (False, "'title' property not formatted correctly in metadata './somepath/metadata.json'")
assert v._validateTitle('./somepath/metadata.json', {'title': ' '}) == (False, "'title' property not formatted correctly in metadata './somepath/metadata.json'")
assert v._validateTitle('./somepath/metadata.json', {'title': 'lorem ipsum'}) == (True)
def test_validateDescription():
v = helpers.Validation()
assert v._validateDescription('./somepath/metadata.json', {}) == (False, "'description' property not found in metadata './somepath/metadata.json'")
assert v._validateDescription('./somepath/metadata.json', {'description': ''}) == (False, "'description' property not formatted correctly in metadata './somepath/metadata.json'")
assert v._validateDescription('./somepath/metadata.json', {'description': 10}) == (False, "'description' property not formatted correctly in metadata './somepath/metadata.json'")
assert v._validateDescription('./somepath/metadata.json', {'description': ' '}) == (False, "'description' property not formatted correctly in metadata './somepath/metadata.json'")
assert v._validateDescription('./somepath/metadata.json', {'description': 'lorem ipsum'}) == (True)
def test_validateTags():
v = helpers.Validation()
assert v._validateTags('./somepath/metadata.json', {}, MockedFiles._mockConfig()) == (False, "'tags' property not found in metadata './somepath/metadata.json'")
assert v._validateTags('./somepath/metadata.json', {'tags': 'lorem ipsum'}, MockedFiles._mockConfig()) == (False, "'tags' property is not in correct format in metadata './somepath/metadata.json'")
assert v._validateTags('./somepath/metadata.json', {'tags': ['lorem ipsum']}, MockedFiles._mockConfig()) == (False, "invalid tag 'lorem ipsum' in metadata './somepath/metadata.json'")
assert v._validateTags('./somepath/metadata.json', {'tags': ['01_Folder']}, MockedFiles._mockConfig()) is True
def test_validateRoles():
v = helpers.Validation()
assert v._validateRoles('./somepath/metadata.json', {}, MockedFiles._mockConfig()) == (False, "'roles' property not found in metadata './somepath/metadata.json'")
assert v._validateRoles('./somepath/metadata.json', {'roles': 'lorem ipsum'}, MockedFiles._mockConfig()) == (False, "'roles' property is not in correct format in metadata './somepath/metadata.json'")
assert v._validateRoles('./somepath/metadata.json', {'roles': ['lorem ipsum']}, MockedFiles._mockConfig()) == (False, "invalid role 'lorem ipsum' in metadata './somepath/metadata.json'")
assert v._validateRoles('./somepath/metadata.json', {'roles': ['AppDev']}, MockedFiles._mockConfig()) is True
def test_validateConfig():
v = helpers.Validation()
assert v.validateConfig('./somepath/config.json', None) == (False, "configuration in './somepath/config.json' is empty")
assert v.validateConfig('./somepath/config.json', {'foo': 'bar'}) == (False, "value 'foo' not allowed in configuration './somepath/config.json'")
assert v.validateConfig('./somepath/config.json', {'roles': ['AppDev']}) == (False, "expected value 'tags' not found in configuration './somepath/config.json'")
assert v.validateConfig('./somepath/config.json', {'tags': ['0f_Folder'], 'roles': ['AppDev']}) is True
``` |
{
"source": "a11to1n3/VMDStatFilterInDeepLearning4WinEnData",
"score": 4
} |
#### File: a11to1n3/VMDStatFilterInDeepLearning4WinEnData/dataFilter.py
```python
from numpy import mean
from numpy import std
def filterWithConfidenceLevel(data_all,confidence_level):
"""Summary or Description of the Function
this function filters the data by a given confidence level.
Parameters:
data_all (array): the data about to be filtered
confidence_level (int): given confidence level ranging from 90 to 99.99932
Returns:
data_all (array): the filtered data
"""
if confidence_level == 90:
sigma = 1.645
elif confidence_level == 91:
sigma = 1.695
elif confidence_level == 92:
sigma = 1.75
elif confidence_level == 93:
sigma = 1.81
elif confidence_level == 94:
sigma = 1.88
elif confidence_level == 95:
sigma = 1.96
elif confidence_level == 96:
sigma = 2.05
elif confidence_level == 97:
sigma = 2.17
elif confidence_level == 98:
sigma = 2.33
elif confidence_level == 99:
sigma = 2.58
elif confidence_level == 99.73:
sigma = 3
elif confidence_level == 99.99366:
sigma = 4
elif confidence_level == 99.99932:
sigma = 4.5
# calculate summary statistics
data_mean, data_std = mean(data_all[:,:,0].reshape(-1)), std(data_all[:,:,0].reshape(-1))
#data_min = min(data_all[:,:,0].reshape(-1))
#data_max = max(data_all[:,:,0].reshape(-1))
# identify outliers
cut_off = data_std * sigma
lower, upper = data_mean - cut_off, data_mean + cut_off
for i in range(data_all.shape[0]):
for j in range(data_all.shape[1]):
if data_all[i,j,0] < lower:
data_all[i,j,0]= lower
elif data_all[i,j,0] > upper:
data_all[i,j,0] = upper
return data_all
```
#### File: a11to1n3/VMDStatFilterInDeepLearning4WinEnData/dataWrangling.py
```python
import pandas as pd
import numpy as np
def csvToArray(filename):
"""Summary or Description of the Function
Parameters:
filenam (str): the name of input file (with csv extention)
Returns:
data (array): returning arrays after wrangling to 133 elements
"""
# %% Load data
df = pd.read_csv(filename)
# %% Create array
data = np.zeros((len(df),24,133))
#data = np.zeros((3299,24,133))
#Set holiday variable
data[:,0,-1] = df.values[:,-2]
data[:,1,-1] = df.values[:,-2]
data[:,2,-1] = df.values[:,-2]
data[:,3,-1] = df.values[:,-2]
data[:,4,-1] = df.values[:,-2]
data[:,5,-1] = df.values[:,-2]
data[:,6,-1] = df.values[:,-2]
data[:,7,-1] = df.values[:,-2]
data[:,8,-1] = df.values[:,-2]
data[:,9,-1] = df.values[:,-2]
data[:,10,-1] = df.values[:,-2]
data[:,11,-1] = df.values[:,-2]
data[:,12,-1] = df.values[:,-2]
data[:,13,-1] = df.values[:,-2]
data[:,14,-1] = df.values[:,-2]
data[:,15,-1] = df.values[:,-2]
data[:,16,-1] = df.values[:,-2]
data[:,17,-1] = df.values[:,-2]
data[:,18,-1] = df.values[:,-2]
data[:,19,-1] = df.values[:,-2]
data[:,20,-1] = df.values[:,-2]
data[:,21,-1] = df.values[:,-2]
data[:,22,-1] = df.values[:,-2]
data[:,23,-1] = df.values[:,-2]
#Set hour variable
data[:,0,1] = 1
data[:,1,2] = 1
data[:,2,3] = 1
data[:,3,4] = 1
data[:,4,5] = 1
data[:,5,6] = 1
data[:,6,7] = 1
data[:,7,8] = 1
data[:,8,9] = 1
data[:,9,10] = 1
data[:,10,11] = 1
data[:,11,12] = 1
data[:,12,13] = 1
data[:,13,14] = 1
data[:,14,15] = 1
data[:,15,16] = 1
data[:,16,17] = 1
data[:,17,18] = 1
data[:,18,19] = 1
data[:,19,20] = 1
data[:,20,21] = 1
data[:,21,22] = 1
data[:,22,23] = 1
data[:,23,24] = 1
#Set hourly values
data[:,0,0] = df.values[:,2]
data[:,1,0] = df.values[:,3]
data[:,2,0] = df.values[:,4]
data[:,3,0] = df.values[:,5]
data[:,4,0] = df.values[:,6]
data[:,5,0] = df.values[:,7]
data[:,6,0] = df.values[:,8]
data[:,7,0] = df.values[:,9]
data[:,8,0] = df.values[:,10]
data[:,9,0] = df.values[:,11]
data[:,10,0] = df.values[:,12]
data[:,11,0] = df.values[:,13]
data[:,12,0] = df.values[:,14]
data[:,13,0] = df.values[:,15]
data[:,14,0] = df.values[:,16]
data[:,15,0] = df.values[:,17]
data[:,16,0] = df.values[:,18]
data[:,17,0] = df.values[:,19]
data[:,18,0] = df.values[:,20]
data[:,19,0] = df.values[:,21]
data[:,20,0] = df.values[:,22]
data[:,21,0] = df.values[:,23]
data[:,22,0] = df.values[:,24]
#for i in range(len(df)):
# data[i,21,0] = float(df.values[i,23])
# data[i,22,0] = float(df.values[i,24])
data[:,23,0] = df.values[:,25]
#Set weekday & day indices
for i in range(len(df)):
if df.values[i,0] == 'CN':
data[i,0,31] = 1
data[i,1,31] = 1
data[i,2,31] = 1
data[i,3,31] = 1
data[i,4,31] = 1
data[i,5,31] = 1
data[i,6,31] = 1
data[i,7,31] = 1
data[i,8,31] = 1
data[i,9,31] = 1
data[i,10,31] = 1
data[i,11,31] = 1
data[i,12,31] = 1
data[i,13,31] = 1
data[i,14,31] = 1
data[i,15,31] = 1
data[i,16,31] = 1
data[i,17,31] = 1
data[i,18,31] = 1
data[i,19,31] = 1
data[i,20,31] = 1
data[i,21,31] = 1
data[i,22,31] = 1
data[i,23,31] = 1
else:
a = int(df.values[i,0])+23
data[i,0,a] = 1
data[i,1,a] = 1
data[i,2,a] = 1
data[i,3,a] = 1
data[i,4,a] = 1
data[i,5,a] = 1
data[i,6,a] = 1
data[i,7,a] = 1
data[i,8,a] = 1
data[i,9,a] = 1
data[i,10,a] = 1
data[i,11,a] = 1
data[i,12,a] = 1
data[i,13,a] = 1
data[i,14,a] = 1
data[i,15,a] = 1
data[i,16,a] = 1
data[i,17,a] = 1
data[i,18,a] = 1
data[i,19,a] = 1
data[i,20,a] = 1
data[i,21,a] = 1
data[i,22,a] = 1
data[i,23,a] = 1
b = int(df.values[i,1].split('/')[1])+31
data[i,0,b] = 1
data[i,1,b] = 1
data[i,2,b] = 1
data[i,3,b] = 1
data[i,4,b] = 1
data[i,5,b] = 1
data[i,6,b] = 1
data[i,7,b] = 1
data[i,8,b] = 1
data[i,9,b] = 1
data[i,10,b] = 1
data[i,11,b] = 1
data[i,12,b] = 1
data[i,13,b] = 1
data[i,14,b] = 1
data[i,15,b] = 1
data[i,16,b] = 1
data[i,17,b] = 1
data[i,18,b] = 1
data[i,19,b] = 1
data[i,20,b] = 1
data[i,21,b] = 1
data[i,22,b] = 1
data[i,23,b] = 1
c = int(df.values[i,-1])+62
data[i,0,c] = 1
data[i,1,c] = 1
data[i,2,c] = 1
data[i,3,c] = 1
data[i,4,c] = 1
data[i,5,c] = 1
data[i,6,c] = 1
data[i,7,c] = 1
data[i,8,c] = 1
data[i,9,c] = 1
data[i,10,c] = 1
data[i,11,c] = 1
data[i,12,c] = 1
data[i,13,c] = 1
data[i,14,c] = 1
data[i,15,c] = 1
data[i,16,c] = 1
data[i,17,c] = 1
data[i,18,c] = 1
data[i,19,c] = 1
data[i,20,c] = 1
data[i,21,c] = 1
data[i,22,c] = 1
data[i,23,c] = 1
d = int(df.values[i,1].split('/')[0])+31
data[i,0,d] = 1
data[i,1,d] = 1
data[i,2,d] = 1
data[i,3,d] = 1
data[i,4,d] = 1
data[i,5,d] = 1
data[i,6,d] = 1
data[i,7,d] = 1
data[i,8,d] = 1
data[i,9,d] = 1
data[i,10,d] = 1
data[i,11,d] = 1
data[i,12,d] = 1
data[i,13,d] = 1
data[i,14,d] = 1
data[i,15,d] = 1
data[i,16,d] = 1
data[i,17,d] = 1
data[i,18,d] = 1
data[i,19,d] = 1
data[i,20,d] = 1
data[i,21,d] = 1
data[i,22,d] = 1
data[i,23,d] = 1
if d-115 in [1,2,3]:
data[i,0,128] = 1
data[i,1,128] = 1
data[i,2,128] = 1
data[i,3,128] = 1
data[i,4,128] = 1
data[i,5,128] = 1
data[i,6,128] = 1
data[i,7,128] = 1
data[i,8,128] = 1
data[i,9,128] = 1
data[i,10,128] = 1
data[i,11,128] = 1
data[i,12,128] = 1
data[i,13,128] = 1
data[i,14,128] = 1
data[i,15,128] = 1
data[i,16,128] = 1
data[i,17,128] = 1
data[i,18,128] = 1
data[i,19,128] = 1
data[i,20,128] = 1
data[i,21,128] = 1
data[i,22,128] = 1
data[i,23,128] = 1
elif d-115 in [4,5,6]:
data[i,0,129] = 1
data[i,1,129] = 1
data[i,2,129] = 1
data[i,3,129] = 1
data[i,4,129] = 1
data[i,5,129] = 1
data[i,6,129] = 1
data[i,7,129] = 1
data[i,8,129] = 1
data[i,9,129] = 1
data[i,10,129] = 1
data[i,11,129] = 1
data[i,12,129] = 1
data[i,13,129] = 1
data[i,14,129] = 1
data[i,15,129] = 1
data[i,16,129] = 1
data[i,17,129] = 1
data[i,18,129] = 1
data[i,19,129] = 1
data[i,20,129] = 1
data[i,21,129] = 1
data[i,22,129] = 1
data[i,23,129] = 1
elif d-115 in [7,8,9]:
data[i,0,130] = 1
data[i,1,130] = 1
data[i,2,130] = 1
data[i,3,130] = 1
data[i,4,130] = 1
data[i,5,130] = 1
data[i,6,130] = 1
data[i,7,130] = 1
data[i,8,130] = 1
data[i,9,130] = 1
data[i,10,130] = 1
data[i,11,130] = 1
data[i,12,130] = 1
data[i,13,130] = 1
data[i,14,130] = 1
data[i,15,130] = 1
data[i,16,130] = 1
data[i,17,130] = 1
data[i,18,130] = 1
data[i,19,130] = 1
data[i,20,130] = 1
data[i,21,130] = 1
data[i,22,130] = 1
data[i,23,130] = 1
else:
data[i,0,131] = 1
data[i,1,131] = 1
data[i,2,131] = 1
data[i,3,131] = 1
data[i,4,131] = 1
data[i,5,131] = 1
data[i,6,131] = 1
data[i,7,131] = 1
data[i,8,131] = 1
data[i,9,131] = 1
data[i,10,131] = 1
data[i,11,131] = 1
data[i,12,131] = 1
data[i,13,131] = 1
data[i,14,131] = 1
data[i,15,131] = 1
data[i,16,131] = 1
data[i,17,131] = 1
data[i,18,131] = 1
data[i,19,131] = 1
data[i,20,131] = 1
data[i,21,131] = 1
data[i,22,131] = 1
data[i,23,131] = 1
return data
```
#### File: a11to1n3/VMDStatFilterInDeepLearning4WinEnData/waveletFunction.py
```python
import pywt
import numpy as np
def waveletFunc(signal, wavelet="db2", level=1):
coeff = pywt.wavedec(signal, wavelet, level=1)
cA1, cD1 = coeff
cD11 = np.zeros_like(cD1)
cA11 = np.zeros_like(cA1)
coeff1 = list([cA1,cD11])
coeff2 = list([cA11,cD1])
highCancelledSignal = pywt.waverec(coeff1, wavelet)
lowCancelledSignal = pywt.waverec(coeff2, wavelet)
return highCancelledSignal, lowCancelledSignal, cA1, cD1
def extract(data_load):
high_end = data_load.copy()
low_low_end = data_load.copy()
low_high_end = data_load.copy()
_,high_end_tmp, low_init, high_init = waveletFunc(data_load[:,:,0].reshape(-1))
high_end[:,:,0] = high_end_tmp.reshape(-1,24)
low_low_init, low_high_init,_,_ = waveletFunc(low_init)
low_low_coeff = list([low_low_init, high_init])
low_high_coeff = list([low_high_init, high_init])
low_low_end[:,:,0] = pywt.waverec(low_low_coeff,'db2').reshape(-1,24)
low_high_end[:,:,0] = pywt.waverec(low_high_coeff,'db2').reshape(-1,24)
return high_end, low_low_end, low_high_end
``` |
{
"source": "a1247418/MT18_LH_human-sleep-classification",
"score": 2
} |
#### File: MT18_LH_human-sleep-classification/bin/classify.py
```python
import os
import sys
root_dir = os.path.abspath(os.path.join(os.path.dirname('__file__'), '..'))
sys.path.insert(0, root_dir)
from sleeplearning.lib.base import Base
import argparse
from cfg.config import *
import numpy as np
def main(args):
clf = Base(cuda=torch.cuda.is_available(), verbose=False)
clf.restore(args.model)
print("channels: \n", [c[0] for c in clf.ds['channels']])
prediction = clf.predict(args.subject)
subject_name = os.path.basename(os.path.normpath(args.subject))
output_path = os.path.join(args.output_dir, subject_name + '.csv')
np.savetxt(output_path, prediction, delimiter=",", fmt='%i')
print(f"Prediction saved to {output_path}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='SleepLearning Validation')
parser.add_argument('--model',
default='../models/cv_sleepedf_Fz_2D_singlechanexp2_6464962FC_MP/fold0/checkpoint.pth.tar',
required=False,
help='file or folder of pytorch model (*.pth.tar')
parser.add_argument('--subject',
default='../data/sleepedf/SC4001E0-PSG',
help='subject file to predict')
parser.add_argument('--output_dir',
default='',
help='folder where predictions are saved')
args = parser.parse_args()
main(args)
```
#### File: MT18_LH_human-sleep-classification/bin/validate.py
```python
import os
import shutil
import pandas as pd
import sys
root_dir = os.path.abspath(os.path.join(os.path.dirname('__file__'), '..'))
sys.path.insert(0, root_dir)
from sleeplearning.lib.base import Base
import sleeplearning.lib.utils as utils
import argparse
from cfg.config import *
import numpy as np
import glob
def main(args):
subjects = pd.read_csv(args.subject_csv, header=None)[
0].dropna().tolist()
if os.path.isdir(args.model):
models = glob.glob(args.model + '/*')
else:
models = [args.model]
for model in models:
clf = Base(cuda=torch.cuda.is_available(), verbose=True)
clf.restore(model)
print("channels: \n", [c[0] for c in clf.ds['channels']])
if args.channel_drop is not None:
channel_drop = tuple(map(float, args.channel_drop[1:-1].split(',')))
suffix = '_'+args.channel_drop
else:
channel_drop = None
suffix = ''
output_dir = os.path.join(args.output_dir, os.path.basename(
os.path.normpath(model))+suffix)
if os.path.exists(output_dir) and os.path.isdir(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
print("last acc: ", clf.last_acc, "best acc: ", clf.best_acc_)
channel_accuracies = np.array([])
for subject in subjects:
output, metrics = utils.get_model_output(clf, args.data_dir,
subject, channel_drop)
accuracy = metrics['top1'].avg
savedict = dict(subject=subject, acc=accuracy)
for k, v in output.items():
savedict[k] = v
np.savez(os.path.join(output_dir, subject), **savedict)
channel_accuracies = np.append(channel_accuracies, accuracy)
print("\n{}: {}\n".format(subject, accuracy))
print("mean acc: ", np.mean(channel_accuracies))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='SleepLearning Validation')
parser.add_argument('--model',
default='../models/3008_Attnet_ChannelDrop0.1.pth.tar',
required=False,
help='file or folder of pytorch model (*.pth.tar')
parser.add_argument('--data_dir',
default='/cluster/scratch/hlinus/physionet-challenge-train/',
help='folder containing psg files')
parser.add_argument('--subject_csv',
default='../cfg/physionet18/test_rs50_0.csv',
help='csv file containing validation/test subjects')
parser.add_argument('--output_dir',
default='/cluster/scratch/hlinus/AttentionNet',
help='folder where predictions are saved')
parser.add_argument('--channel_drop',
required=False,
help='tuple of channel dropout probabilities')
args = parser.parse_args()
main(args)
```
#### File: sleeplearning/lib/evaluation.py
```python
import itertools
import os
import shutil
import sys
import glob
from collections import defaultdict
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, \
accuracy_score, cohen_kappa_score
from sklearn.metrics import f1_score
from matplotlib import gridspec
import seaborn as sns
CLASSES = ['W', 'N1', 'N2', 'N3', 'REM']
def get_basename_(path):
name = os.path.basename(os.path.normpath(path))
# cut of number for ordering
if len(name)>1 and name[1] == '_':
name = name.split("_")[-1]
return name
def cm_figure_(prediction, truth, classes, configuration_name):
classes = classes.copy()
cm = confusion_matrix(truth, prediction, labels=range(len(classes)))
num_classes = cm.shape[0]
per_class_metrics = np.array(
precision_recall_fscore_support(truth, prediction, beta=1.0,
labels=range(
len(classes)))).T.round(2)
cm_norm = cm.astype('float') * 1 / (cm.sum(axis=1)[:, np.newaxis]+1e-7)
cm_norm = np.nan_to_num(cm_norm, copy=True)
fig = plt.figure(figsize=(3, 2), dpi=320, facecolor='w',
edgecolor='k')
ax = fig.add_subplot(1, 1, 1)
im = ax.imshow(
np.concatenate((cm_norm, np.zeros((len(classes), 4))), axis=1),
cmap='Oranges')
classes += ['PR', 'RE', 'F1', 'S']
xtick_marks = np.arange(len(classes))
ytick_marks = np.arange(len(classes) - 4)
ax.set_xlabel('Predicted', fontsize=5, weight='bold')
ax.set_xticks(xtick_marks)
c = ax.set_xticklabels(classes, fontsize=5, ha='center')
#ax.xaxis.set_label_position('top')
#ax.xaxis.tick_top()
ax.set_ylabel('True Label', fontsize=5, weight='bold')
ax.set_yticks(ytick_marks)
ax.set_yticklabels(classes[:-4], fontsize=5, va='center')
ax.yaxis.set_label_position('left')
ax.yaxis.tick_left()
ax.set_title(configuration_name, fontsize=5, horizontalalignment='center')
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
ax.text(j, i, '{}\n({:.2f})'.format(cm[i, j], cm_norm[i, j]),
horizontalalignment="center", fontsize=5,
verticalalignment='center', color="black")
for i, j in itertools.product(range(cm.shape[0]),
range(cm.shape[1], cm.shape[1] + 4)):
val = per_class_metrics[i, j - num_classes]
ax.text(j, i, val if j != cm.shape[1] + 3 else int(val),
horizontalalignment="center", fontsize=5,
verticalalignment='center', color="black")
return fig
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", xlabel=None, ylabel=None, **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Arguments:
data : A 2D numpy array of shape (N,M)
row_labels : A list or array of length N with the labels
for the rows
col_labels : A list or array of length M with the labels
for the columns
Optional arguments:
ax : A matplotlib.axes.Axes instance to which the heatmap
is plotted. If not provided, use current axes or
create a new one.
cbar_kw : A dictionary with arguments to
:meth:`matplotlib.Figure.colorbar`.
cbarlabel : The label for the colorbar
All other arguments are directly passed on to the imshow call.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
#cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
#cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=False, bottom=True,
labeltop=False, labelbottom=True)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=0, ha="center",
rotation_mode="anchor")
# Turn spines off and create white grid.
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im
def annotate_heatmap(im, data=None, valfmt="{x:.2f}",
textcolors=["black", "white"],
threshold=None, **textkw):
"""
A function to annotate a heatmap.
Arguments:
im : The AxesImage to be labeled.
Optional arguments:
data : Data used to annotate. If None, the image's data is used.
valfmt : The format of the annotations inside the heatmap.
This should either use the string format method, e.g.
"$ {x:.2f}", or be a :class:`matplotlib.ticker.Formatter`.
textcolors : A list or array of two color specifications. The first is
used for values below a threshold, the second for those
above.
threshold : Value in data units according to which the colors from
textcolors are applied. If None (the default) uses the
middle of the colormap as separation.
Further arguments are passed on to the created text labels.
"""
import matplotlib
if not isinstance(data, (list, np.ndarray)):
data = im.get_array()
# Normalize the threshold to the images color range.
if threshold is not None:
threshold = im.norm(threshold)
else:
threshold = im.norm(data.max())/2.
# Set default alignment to center, but allow it to be
# overwritten by textkw.
kw = dict(horizontalalignment="center",
verticalalignment="center", fontsize=8)
kw.update(textkw)
# Get the formatter in case a string is supplied
if isinstance(valfmt, str):
valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)
# Loop over the data and create a `Text` for each "pixel".
# Change the text's color depending on the data.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
if data[i, j] <=1:
kw.update(color=textcolors[im.norm(data[i, j]) > threshold])
text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)
else:
text = im.axes.text(j, i, "{:d}".format(int(data[i, j]), None),
**kw)
texts.append(text)
return texts
def table_plot_(table, yticks, xticks, agg_table: bool = True):
num_yticks = len(yticks)
# m.configs]
aggs = np.stack([np.mean(table, 0), np.std(table, 0)], axis=0)
#fig = plt.figure(figsize=(8.27, 11.69), dpi=320, facecolor='w',
# edgecolor='k')
fig = plt.figure(figsize=(len(xticks), .5*len(yticks)), dpi=120,
facecolor='w',
edgecolor='k')
gs = gridspec.GridSpec(num_yticks + 4, len(xticks))
ax1 = fig.add_subplot(gs[:num_yticks, :])
# plt.suptitle(PREFIX, fontsize=12)
# ax1 = plt.subplot(211)#fig.add_subplot(2, 1, 1)
ax1.imshow(table[:num_yticks], cmap='YlGn', aspect="auto")
for i, j in itertools.product(range(num_yticks),
range(table.shape[1])):
ax1.text(j, i, '{:.3f}'.format(table[i, j]),
horizontalalignment="center", fontsize=10,
verticalalignment='center', color="black")
ytick_marks = np.arange(num_yticks)
ax1.set_yticks(ytick_marks)
ax1.set_yticklabels(yticks)
ax1.set_xticklabels([])
if agg_table:
ax2 = fig.add_subplot(gs[num_yticks + 1:, :])
ax2.imshow(aggs, cmap='YlGn', aspect="auto")
# ax2.set_aspect('equal', 'box')
# plt.imshow(table,cmap='Oranges')
for i, j in itertools.product(range(aggs.shape[0]),
range(aggs.shape[1])):
ax2.text(j, i, '{:.3f}'.format(aggs[i, j]),
horizontalalignment="center", fontsize=10,
verticalalignment='center', color="black")
ytick_marks = np.arange(2)
ax2.set_yticks(ytick_marks)
ax2.set_yticklabels(['mean', 'std'])
ax1 = ax2
xtick_marks = np.arange(len(xticks))
ax1.set_xticks(xtick_marks)
ax1.set_xticklabels(xticks, rotation=60)
return fig
def table_plot_folded_(table, yticks, xticks, agg_table: bool = False):
yticks = [y.replace("WESA_","").replace("_MLready.npz","") for y in yticks]
num_yticks = (len(yticks)+1) //2
max_yticks = len(yticks)
xticks = xticks + xticks
# m.configs]
min_val = min([min(t) for t in table])
max_val = max([max(t) for t in table])
aggs = np.stack([np.mean(table, 0), np.std(table, 0)], axis=0)
#fig = plt.figure(figsize=(8.27, 11.69), dpi=320, facecolor='w',
# edgecolor='k')
fig = plt.figure(figsize=(len(xticks), .5*num_yticks), dpi=120,
facecolor='w',
edgecolor='k')
gs = gridspec.GridSpec(num_yticks + 4, len(xticks))
ax1 = fig.add_subplot(gs[:num_yticks, :(len(xticks)//2)])
ax1.imshow(table[:num_yticks], cmap='YlGn', aspect="auto", vmin=min_val, vmax=max_val)
for i, j in itertools.product(range(num_yticks),
range(table.shape[1])):
ax1.text(j, i, '{:.3f}'.format(table[i, j]),
horizontalalignment="center", fontsize=10,
verticalalignment='center', color="black")
ax2 = fig.add_subplot(gs[:num_yticks, (len(xticks)//2):])
ax2.imshow(table[num_yticks:], cmap='YlGn', aspect="auto", vmin=min_val, vmax=max_val)
for i, j in itertools.product(range(num_yticks, max_yticks),
range(table.shape[1])):
ax2.text(j, i-num_yticks, '{:.3f}'.format(table[i, j]),
horizontalalignment="center", fontsize=10,
verticalalignment='center', color="black")
ytick_marks = np.arange(num_yticks)
ax1.set_yticks(ytick_marks)
ax1.set_yticklabels(yticks[:num_yticks])
ax1.set_xticklabels([])
#plt.draw()
#yax = ax1.get_yaxis()
#pad = max(T.label.get_window_extent().width for T in yax.majorTicks)
#yax.set_tick_params(pad=pad)
ytick_marks = np.arange(num_yticks)
ax2.set_yticks(ytick_marks)
ax2.set_yticklabels(yticks[num_yticks:])
ax2.set_xticklabels([])
if agg_table:
ax3 = fig.add_subplot(gs[num_yticks + 1:, :])
ax3.imshow(aggs, cmap='YlGn', aspect="auto")
# ax2.set_aspect('equal', 'box')
# plt.imshow(table,cmap='Oranges')
for i, j in itertools.product(range(aggs.shape[0]),
range(aggs.shape[1])):
ax3.text(j, i, '{:.3f}'.format(aggs[i, j]),
horizontalalignment="center", fontsize=10,
verticalalignment='center', color="black")
ytick_marks = np.arange(2)
ax3.set_yticks(ytick_marks)
ax3.set_yticklabels(['mean', 'std'])
xtick_marks = np.arange(len(xticks) // 2)
ax3.set_xticks(xtick_marks)
ax3.set_xticklabels(xticks, rotation=60)
#ax1 = ax2
xtick_marks = np.arange(len(xticks)//2)
ax1.set_xticks(xtick_marks)
ax1.set_xticklabels(xticks, rotation=60)
ax1.tick_params(labelbottom=False, labeltop=True, labelleft=True, labelright=False,
bottom=False, top=True, left=True, right=False)
ax2.set_xticks(xtick_marks)
ax2.set_xticklabels(xticks, rotation=60)
ax2.tick_params(labelbottom=False, labeltop=True, labelleft=False, labelright=True,
bottom=False, top=True, left=False, right=True)
return fig
class Model(object):
def __init__(self, path):
self.name = get_basename_(path)
self.path = path
print(f"model {self.name}")
self.configs = [Configurations(p) for p in sorted(glob.glob(path + '/*'))]
class Runs(object):
def __init__(self, path):
self.name = get_basename_(path)
print(f"runs: {self.name}")
self.path = path
self.subjects = sorted(glob.glob(path + '/*'))
class Configurations(object):
def __init__(self, path):
self.name = get_basename_(path)
self.path = path
print(f"config: {self.name}")
self.runs = [Runs(p) for p in sorted(glob.glob(path + '/*'))]
class Evaluation(object):
def __init__(self, path):
self.path = path
self.models = [Model(p) for p in sorted(glob.glob(path + '/*'))]
def cm(self):
for i, model in enumerate(self.models):
runs = []
for config in model.configs:
runs.append(config.name)
truth = []
prediction = []
run = config.runs[0]
for path in run.subjects:
result = self.read_subject_file(path)
truth.append(result['y_true'])
prediction.append(result['y_pred'])
truth = list(itertools.chain.from_iterable(truth))
prediction = list(itertools.chain.from_iterable(prediction))
cm = confusion_matrix(truth, prediction,
labels=range(5))
cm_norm = cm.astype('float') * 1 / (
cm.sum(axis=1)[:, np.newaxis] + 1e-7)
cm_norm = np.nan_to_num(cm_norm, copy=True)
fig, (ax2) = plt.subplots(1, 1,
figsize=(2.5,2.5),
dpi=200) #
plt.subplots_adjust(hspace=.05)
fig.suptitle(get_basename_(model.name),
fontsize=8, weight="bold",y=0.93)
per_class_metrics = np.array(
precision_recall_fscore_support(truth, prediction, beta=1.0,
labels=range(
5))).round(
2)
#im = heatmap(per_class_metrics, ['PR', 'RE', 'F1', 'S'],
# ('W', 'N1', 'N2', 'N3', 'REM'),
# ax=ax1, cmap="YlGn", vmin=0,vmax=1e10,
# aspect='auto')
#texts = annotate_heatmap(im, valfmt="{x:.2f} ")
im = heatmap(cm_norm, ('W', 'N1', 'N2', 'N3', 'REM'),
('W', 'N1', 'N2', 'N3', 'REM'),
ax=ax2, cmap="YlGn", aspect='auto',
xlabel="Predicted Label", ylabel="True Label")
texts = annotate_heatmap(im, valfmt="{x:.2f} ")
#ax2.get_shared_x_axes().join(ax1, ax2)
#ax1.tick_params(axis="x", labelbottom=0)
#ax1.tick_params(
# axis='x', # changes apply to the x-axis
# which='both', # both major and minor ticks are affected
# bottom=False, # ticks along the bottom edge are off
# top=False, # ticks along the top edge are off
# labelbottom=False) # labels along the bottom edge are off
try:
plt.savefig("cv_plots/cv_cm_" + model.name + ".eps", dpi=300, transparent=True, bbox_inches="tight")
except:
print("Failed saving plot.")
def boxplot(self, xlabel=None, ymin=.4):
models = []
rows = []
for i, model in enumerate(self.models):
models.append(model.name)
configs = []
for config in model.configs:
configs.append(config.name)
if len(config.runs) == 0: continue
run = config.runs[0]
for path in run.subjects:
result = self.read_subject_file(path)
acc = result['acc']/100
rows.append([get_basename_(path), model.name, config.name,
acc])
df = pd.DataFrame(rows, columns=['subject', 'model', 'config',
'accuracy'])
fig, ax = plt.subplots(figsize=(6,4), dpi=120)
#ax.set_title("Subject-wise accuracy", fontsize=14)
ax = sns.boxplot(x="config", y="accuracy", hue="model", data=df,
#palette="Set3",
order=[c.name for c in self.models[0].configs])
ax.tick_params(labelsize=10)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=10)
else:
ax.set_xlabel("")
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15),
fancybox=True, shadow=True, ncol=5, fontsize=10)
ax.set_axisbelow(True)
ax.yaxis.grid(color='gray', linestyle='dashed')
ax.set_ylim(ymin=ymin, ymax=1)
ax.set_ylabel('subject accuracy', fontsize=10)
def bar(self, xlabel=None, ymin=0.4):
models = []
means = []
stds = []
rows = []
for i, model in enumerate(self.models):
models.append(model.name)
runs = []
model_mean = []
model_std = []
for config in model.configs:
runs.append(config.name)
accs = np.array([])
for j, run in enumerate(config.runs):
truth = []
prediction = []
for path in run.subjects:
result = self.read_subject_file(path)
truth.append(result['y_true'])
prediction.append(result['y_pred'])
truth = list(itertools.chain.from_iterable(truth))
prediction = list(itertools.chain.from_iterable(prediction))
acc = accuracy_score(truth, prediction)
f1m = f1_score(truth, prediction, average='macro')
_, _, f1c, _ = precision_recall_fscore_support(truth,
prediction,
beta=1.0,
labels=range(
5))
kappa = cohen_kappa_score(truth, prediction)
rows.append(
[model.name, config.name, acc, f1m, kappa] + list(f1c))
accs = np.append(accs, acc)
model_mean.append(np.mean(accs))
model_std.append(np.std(accs))
means.append(model_mean)
stds.append(model_std)
cols = ['model', 'config',
'accuracy', 'f1m', 'kappa', 'W',
'N1', 'N2', 'N3', 'R']
df = pd.DataFrame(rows, columns=cols)
fig, ax = plt.subplots(figsize=(6, 4), dpi=120)
res = df.groupby(['model', 'config'], as_index=False)[cols].mean()
print(res.round(3).to_latex())
ax.set_title("Overall accuracy")
ax = sns.barplot(x="config", y="accuracy", hue="model", data=df,
#palette="Set3",
order=[c.name for c in self.models[0].configs])
ax.tick_params(labelsize=10)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=10)
else:
ax.set_xlabel("")
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15),
fancybox=True, shadow=True, ncol=5, fontsize=10)
ax.set_axisbelow(True)
ax.yaxis.grid(color='gray', linestyle='dashed')
ax.set_ylim(ymin=ymin, ymax=1)
ax.set_ylabel('accuracy', fontsize=10)
def hypnogram(self, index=0, models=None, config=None, start=None,
end=None):
models = self.models if models is None else [m for m in self.models
if m.name in models]
if len(models) == 0: raise ValueError("no matching models found!")
f, axarr = plt.subplots(len(models), 1, squeeze=False,
sharex=True, sharey=True,
figsize=(10, 3.5 * len(models)), dpi=320)
plt.yticks(range(5), ['W', 'N1', 'N2', 'N3', 'REM'], fontsize=10)
for i, model in enumerate(models):
cfg = model.configs[0] if config is None else\
next((item for item in model.configs if item.name == config),
None)
if cfg is None:
raise ValueError(f"config {config} not found")
run = cfg.runs[0]
path = run.subjects[index]
subject = get_basename_(path)
f.suptitle(f"{subject}", fontsize=12)
result = self.read_subject_file(path)
# only part of record
if start is None and end is None:
end = len(result['y_pred'])
start = 0
axarr[i, 0].set_xlim(xmin=start, xmax=end)
axarr[i, 0].plot(range(len(result['y_pred'])), result['y_pred'],
label="prediction")
axarr[i, 0].set_ylim(ymin=0.0)
#axarr[i, 0].plot(range(len(result['y_true'])), result[
# 'y_true'], alpha=0.9, label="truth", linestyle=':')
wrong = np.argwhere(np.not_equal(result['y_true'], result[
'y_pred']))
axarr[i, 0].plot(wrong, result['y_true'][wrong], '.',
label="error")
acc = result['acc']
#axarr[i, 0].set_title(f"{model.name} ({cfg.name}) - "
axarr[i, 0].set_title(f"{model.name} [ACC: {acc:.2f}%]",
fontsize=10)
# f"[{acc:.2f}%]", fontsize=10)
if 'attention' in result.keys():
ax2 = axarr[i, 0].twinx()
# same x-axis
color = 'tab:green'
ax2.set_ylabel('attention', color=color, fontsize=10)
attention = result['attention']
ax2.plot(range(len(attention)), attention, color=color)
ax2.tick_params(axis='y', labelcolor=color)
ax2.set_ylim(0.0, 1)
if 'drop' in result.keys():
dropped = np.argwhere(result['drop'])
for d in dropped:
axarr[i, 0].axvspan(d-0.5, d+0.5, alpha=0.2, color='red')
axarr[i, 0].legend(loc='upper center', bbox_to_anchor=(0.5, -0.15),
fancybox=True, shadow=True, ncol=5, fontsize=12)
axarr[i, 0].set_xlabel("epoch", fontsize=10)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
def table(self, folded=False):
table = []
for i, model in enumerate(self.models):
for config in model.configs:
column = []
run = config.runs[0]
for path in run.subjects:
result = self.read_subject_file(path)
column.append(result['acc'])
table.append(column)
table = np.vstack(table).T
subjects = [get_basename_(p) for p in run.subjects]
xticks = [m.name + '-' + r.name for m in self.models for r in m.configs]
if folded:
table_plot_folded_(table, subjects, xticks)
else:
table_plot_(table, subjects, xticks)
try:
plt.savefig("cv_plots/cv_tab_" + model.name + ".eps", dpi=300, transparent=True, bbox_inches="tight")
except:
print("Failed saving plot.")
def att_subject_table(self):
att_models = []
table = []
for i, model in enumerate(self.models):
for config in model.configs:
column = []
run = config.runs[0]
for path in run.subjects:
result = self.read_subject_file(path)
if not 'attention' in result.keys():
continue
column.append(np.mean(result['attention']))
if column != []:
table.append(column)
att_models.append(model.name + f"({config.name})")
table = np.vstack(table).T
subjects = [get_basename_(p) for p in run.subjects]
#xticks = [m.name + '-' + r.name for m in self.models for r in
# m.configs]
table_plot_(table, subjects, att_models)
def att_table(self):
att_models = []
table = []
for i, model in enumerate(self.models):
for config in model.configs:
print(model.name)
column = [[],[],[],[],[]]
run = config.runs[0]
for path in run.subjects:
result = self.read_subject_file(path)
if not 'attention' in result.keys():
continue
att_per_label = zip(result['y_pred'], result['attention'])
assert(not np.isnan(np.min(result['attention'])))
for label, a in att_per_label:
column[label].append(a)
if column != [[],[],[],[],[]]:
column = [np.mean(np.array(av)) if av != [] else 0 for av
in column]
table.append(column)
att_models.append(model.name)
table = np.vstack(table)
table_plot_(table, att_models, ['W', 'N1', "N2", "N3", "REM"],
agg_table=False)
def extract_experts(self):
def get_acc(prediction, truth):
wrong = np.argwhere(np.not_equal(truth, prediction))
acc = 100 * (1 - (len(wrong) / len(truth)))
return acc
for i, model in enumerate(self.models):
configs = []
true_label_dict = None
for config in model.configs:
experts = None
soft_votes_dict = defaultdict(lambda : [])
hard_votes_dict = defaultdict(lambda : [])
true_label_dict = {}
configs.append(config.name)
accs = np.array([])
if len(config.runs) == 0: continue
run = config.runs[0]
# print("run: ", run.name)
for path in run.subjects:
result = self.read_subject_file(path)
subject = get_basename_(path)
expert_base_path = os.path.join(self.path, os.path.basename(
config.path))
if experts is None:
experts = result['expert_channels']
for expert in experts:
os.makedirs(
os.path.join(self.path, 'Expert-' +
expert, os.path.basename(config.path), 'Expert-' +
expert))
voting_models = ['SOFT-V', 'MAJ-V']
for new_model in voting_models:
path = os.path.join(self.path, new_model, os.path.basename(
config.path), os.path.basename(
config.path))
if os.path.exists(path) and os.path.isdir(path):
shutil.rmtree(path)
for new_model in voting_models:
os.makedirs(os.path.join(self.path, new_model, os.path.basename(
config.path), os.path.basename(
config.path)))
for i in range(result['y_experts'].shape[1]):
y_expert_prob = result['y_experts'][:, i, :]
y_expert_pred = np.argmax(y_expert_prob, 1)
expert = result['expert_channels'][i]
y_true = result['y_true']
true_label_dict[subject] = y_true
a = result['a'][:, i]
drop = None
if 'drop_channels' in result.keys():
drop = result['drop_channels'][:, i]
hard_votes_dict[subject].append(y_expert_pred)
soft_votes_dict[subject].append(y_expert_prob)
wrong = np.argwhere(np.not_equal(y_true, y_expert_pred))
acc = 100*(1-wrong.shape[0]/len(y_expert_pred))
savepath = os.path.join(self.path, 'Expert-' +
expert, os.path.basename(config.path), 'Expert-' +
expert, subject)
savedict = {'y_true': y_true, 'y_pred': y_expert_pred,
'acc': acc, 'attention': a}
if drop is not None:
savedict['drop'] = drop
np.savez(savepath, **savedict)
for subject, predictions in soft_votes_dict.items():
soft_votes = np.array(predictions)
soft_vote = np.mean(soft_votes, axis=0)
soft_vote = np.argmax(soft_vote, axis=1)
y_true = true_label_dict[subject]
savepath = os.path.join(self.path, 'SOFT-V', os.path.basename(
config.path), os.path.basename(
config.path), subject)
savedict = {'y_true': y_true, 'y_pred': soft_vote,
'acc': get_acc(soft_vote, y_true)}
np.savez(savepath, **savedict)
for subject, predictions in hard_votes_dict.items():
hard_votes = np.array(predictions)
from scipy.stats import mode
maj_vote = mode(hard_votes, axis=0)[0][0]
y_true = true_label_dict[subject]
savepath = os.path.join(self.path, 'MAJ-V', os.path.basename(
config.path), os.path.basename(
config.path), subject)
savedict = {'y_true': y_true, 'y_pred': maj_vote,
'acc': get_acc(maj_vote, y_true)}
np.savez(savepath, **savedict)
def read_subject_file(self, path):
file = np.load(path)
truth = file['truth'] if 'truth' in file.keys() else file[
'y_true']
pred = file['pred'] if 'pred' in file.keys() else file['y_pred']
t = file['acc']
#print(t)
#print(type(t))
#print(AverageMeter(t))
#print("avg: ", t.avg)
acc = float(t)
result = {'y_true': truth, 'y_pred': pred, 'acc': acc}
if 'probs' in file.keys():
result['probs'] = file['probs']
if 'y_probs' in file.keys():
result['probs'] = file['y_probs']
if 'expert_channels' in file.keys():
result['expert_channels'] = file['expert_channels']
if 'y_experts' in file.keys():
result['y_experts'] = file['y_experts']
if 'a' in file.keys():
result['a'] = file['a']
if 'attention' in file.keys():
result['attention'] = file['attention']
if 'drop_channels' in file.keys():
result['drop_channels'] = file['drop_channels']
if 'drop' in file.keys():
result['drop'] = file['drop']
return result
if __name__ == '__main__':
path = '/local/home/hlinus/Dev/SleepLearning/reports/results/Physionet18' \
'/DroppingChannels'
e = Evaluation(path)
#e.bar()
e.hypnogram()
#e.att_table()
#e.table()
#e.extract_experts()
#e.att_table()
```
#### File: lib/loaders/baseloader.py
```python
from abc import ABC
from typing import Tuple
import numpy as np
from scipy import signal
class BaseLoader(ABC):
"""Base class which contains the data related to a single day/night of of a
single subject. There is a classmethod for every support file format which
can be used to read in the file and store it as sleeplearning object. To
support a new file format / dataset, a new class method has to be created.
Attributes
----------
psgs : iterable of dictionaries
Uniform raw data for various input formats and taken from one subject.
It is in an iterator over dictionaries, where dictionary key are various
descriptors of individual polysomnographic (PSG) records.
The dictionary contains:
* TODO
spectograms_: dictionary
"""
sleep_stages_labels = {0: 'WAKE', 1: "N1", 2: 'N2', 3: 'N3', 4: 'N4',
5: 'REM', 6: 'Artifact'}
def __init__(self, path: str, epoch_length: int, verbose = False):
self.path = path
self.label = None
self.psgs = None
self.spectograms_ = {}
self.hypnogram = None
self.sampling_rate_ = None #sampling_rate
self.epoch_length = epoch_length #epoch_size
self.window = None #self.sampling_rate_ * 2
self.stride = None #self.sampling_rate_
self.verbose = verbose
def get_spectrograms(self, channel: str, window: int, stride: int) -> Tuple[
np.ndarray, np.ndarray, np.ndarray]:
"""
Compute the spectogram for a specific channel and for every epoch and
return a tuple of (frequencies,times,[spectrograms]) where spectrograms
is a numpy array containing a spectrogram for every epoch
:param channel: channel key as stored in self.psgs
:param window: window size of FFT
:param stride: for overlapping windows
:return: frequencies [fs/2+1], times [epoch size*fs/stride],
spectrogram (magnitudes) [total epochs, fs/2+1, epoch size*fs/stride ]
"""
if channel in self.spectograms_:
return self.spectograms_[channel]
f = t = 0
Sxxs = []
# reshape to [num epochs, samples per epoch]
psgs = self.psgs[channel].reshape(
(-1, self.sampling_rate_ * self.epoch_length))
padding = window // 2 - stride // 2
psgs = np.pad(psgs, pad_width=((0, 0), (padding, padding)), mode='edge')
for psg in psgs:
psg_clean = psg
f, t, Sxx = signal.spectrogram(psg_clean, fs=self.sampling_rate_,
nperseg=window,
noverlap=window - stride,
scaling='density', mode='magnitude')
Sxxs.append(Sxx)
self.spectograms_[channel] = (f, t, np.array(Sxxs))
return self.spectograms_[channel]
def get_psds(self, channel: str, window: int, stride: int) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute the power spectral densities for a specific channel and for
every epoch.
:param channel: channel key as stored in self.psgs
:return: frequencies [fs/2+1], psds [numEpochs, fs/2+1]
"""
pxxs = []
# reshape to [num epochs, samples per epoch]
psgs = self.psgs[channel].reshape(
(-1, self.sampling_rate_ * self.epoch_length))
padding = window // 2 - stride // 2
psgs = np.pad(psgs, pad_width=((0, 0), (padding, padding)), mode='edge')
f = 0
for psg in psgs:
f, pxx = signal.welch(psg, fs=self.sampling_rate_,
nperseg=window,
noverlap=window - stride,
scaling='density')
pxxs.append(pxx)
return f, np.array(pxxs)
```
#### File: lib/loaders/carofile.py
```python
import numpy as np
import scipy.io
from scipy import signal
from typing import Tuple
from sleeplearning.lib.loaders.baseloader import BaseLoader
class Carofile(BaseLoader):
def __init__(self, path: str, epoch_length: int = 20, verbose: bool = False):
super().__init__(path, epoch_length)
psg_dict = {'EEG_raw': 'FpzA2', 'EEG': 'FpzA2_filt', 'EOGR': 'EOGR_filt',
'EOGL': 'EOGL_filt', 'EMG': 'EMG_filt', 'EOGR_raw': 'EOGR',
'EOGL_raw': 'EOGL', 'EMG_raw': 'EMG'}
self.label = self.path.split('/')[-1][5:-12]
self.psgs = {}
mat = scipy.io.loadmat(self.path)
self.sampling_rate_ = int(mat['sampling_rate'][0][0])
epoch_scoring_length = int(mat['epoch_size_scoring_sec'][0][0])
if epoch_scoring_length % self.epoch_length != 0:
raise ValueError(
"epoch length ({0}s) must divide scoring length ({1}s)".format(
str(self.epoch_length), str(epoch_scoring_length)))
experts_present = []
for expert in ["E1","E2","E3","E4","E5","CL"]:
if f'sleepStage_score_{expert}' in mat:
experts_present.append(expert)
self.artefact_data = {'artefacts': [], 'epoch_size': 4}
hypnograms = []
self.hypnogram = np.zeros_like(mat[f'sleepStage_score_{experts_present[0]}'][0])
for i_e, expert in enumerate(experts_present):
if i_e == 0:
self.artefact_data['artefacts'] = mat[f'artfact_per4s_{expert}'][0]
else:
self.artefact_data['artefacts'] = [max(self.artefact_data['artefacts'][i_v], val) for i_v, val in enumerate(mat[f'artfact_per4s_{expert}'][0])]
hypnograms.append(mat[f'sleepStage_score_{expert}'][0])
for h in range(len(self.hypnogram)):
self.hypnogram[h] = np.bincount([hypnograms[h_id][h] for h_id in range(len(experts_present))]).argmax()
if self.hypnogram is None:
print(mat.keys())
if 4 in self.hypnogram:
print(path)
raise Exception("Invalid value in sleep labels")
else:
epoch_scoring_length = self.epoch_length
for k, v in psg_dict.items():
num_samples = mat[v].shape[1]
samples_wo_label = num_samples % (
epoch_scoring_length * self.sampling_rate_)
if verbose: print(k + ": cutting ",
samples_wo_label / self.sampling_rate_,
"seconds at the end")
psg_cut = mat[v][0][:-samples_wo_label]
self.psgs[k] = psg_cut
def get_psds(self, channel: str, window: int, stride: int) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute the power spectral densities for a specific channel and for
every epoch excluding artefacts.
:param channel: channel key as stored in self.psgs
:return: frequencies [fs/2+1], psds [numEpochs, fs/2+1]
"""
pxxs = []
artefacts = np.repeat(self.artefact_data['artefacts'],
self.artefact_data[
'epoch_size'] * self.sampling_rate_)
# reshape to [num epochs, samples per epoch]
psgs = self.psgs[channel].reshape(
(-1, self.sampling_rate_ * self.epoch_length))
artefacts = artefacts.reshape(
(-1, self.sampling_rate_ * self.epoch_length))
padding = window // 2 - stride // 2
psgs = np.pad(psgs, pad_width=((0, 0), (padding, padding)), mode='edge')
artefacts = np.pad(artefacts, pad_width=((0, 0), (padding, padding)),
mode='edge')
f = 0
for psg, artefact in zip(psgs, artefacts):
psg_clean = psg[artefact == 0]
f, pxx = signal.welch(psg_clean, fs=self.sampling_rate_,
nperseg=window,
noverlap=window - stride,
scaling='density')
pxxs.append(pxx)
return f, np.array(pxxs)
```
#### File: lib/models/multivariate_net.py
```python
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import ModuleList
from torch.nn.init import xavier_normal
from sleeplearning.lib.models.deep_sleep_net import DeepFeatureNet_, Conv1dWithBn
class MultivariateNet(nn.Module):
def __init__(self, ts: dict):
super(MultivariateNet, self).__init__()
self.dropout = ts['dropout']
input_shape = ts['input_dim']
conv_per_channel = nn.Sequential(
Conv1dWithBn(1, filter_size=50, n_filters=64, stride=6),
nn.MaxPool1d(4, stride=4),
nn.Dropout(p=self.dropout),
Conv1dWithBn(64, filter_size=8,
n_filters=128, stride=1),
nn.MaxPool1d(4, stride=4),
nn.Dropout(p=self.dropout),
Conv1dWithBn(128, filter_size=8,
n_filters=128, stride=1),
nn.MaxPool1d(4, stride=4),
nn.Dropout(p=self.dropout),
Conv1dWithBn(128, filter_size=8,
n_filters=128, stride=1),
)
self.num_classes = ts['nclasses']
self.dfns = ModuleList([conv_per_channel for _ in range(input_shape[0])])
output_dim = self._get_output_dim((1, input_shape[1]))
self.adaptive_maxp = nn.AdaptiveMaxPool1d(4096)
self.fcn = nn.ModuleList()
for k in range(len(ts['fc_d'])-1):
in_dim = ts['fc_d'][k][0]
out_dim = ts['fc_d'][k+1][0]
dropout = ts['fc_d'][k][1]
self.fcn.append(nn.Linear(in_dim, out_dim,
bias=False))
self.fcn.append(nn.ReLU())
self.fcn.append(nn.Dropout(p=dropout))
self.fcn.append(nn.Linear(ts['fc_d'][-1][0], self.num_classes,
bias=False))
self.fcn.append(nn.ReLU())
self.fcn.append(nn.Dropout(p=ts['fc_d'][-1][1]))
self.weights_init()
# generate input sample and forward to get shape
def _get_output_dim(self, shape):
bs = 1
input = torch.rand(bs, *shape)
output_feat = self.dfns[0](input)
n_size = output_feat.data.view(bs, -1).size(1)
return n_size
def weights_init(m):
for _, mi in m._modules.items():
if isinstance(mi, nn.Conv2d) or isinstance(m, nn.Linear):
xavier_normal(mi.weight.data)
if mi.bias is not None:
xavier_normal(mi.bias.data)
def forward(self, x):
x = F.dropout(x, p=self.dropout)
x = [dfn(torch.unsqueeze(channel, 1)) for (dfn, channel) in zip(self.dfns, torch.unbind(x, 1))]
x = [y.view(y.size(0), 1, -1) for y in x]
x = torch.cat(x, 2)
x = self.adaptive_maxp(x)
x = x.view(x.size(0), -1)
x = F.dropout(x, p=.5)
for i in range(len(self.fcn)):
x = self.fcn[i](x)
return x
``` |
{
"source": "a12590/MedicalWeb",
"score": 3
} |
#### File: a12590/MedicalWeb/Medcontent.py
```python
import urllib2
import pymysql.cursors
import requests
from bs4 import BeautifulSoup
import bs4
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
class Medical(object):
def __init__(self, *args, **kwargs):
self.conn = pymysql.connect(host='localhost', port=3306, user='root', password='', db='douban', charset='utf8')
self.cursor = self.conn.cursor()
self.sql_info = "INSERT IGNORE INTO `douban_mov` VALUES(%s,%s,%s,%s,%s,%s)"
def getHTMLText(url):
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ""
def fillUnivList(ulist, html):
soup = BeautifulSoup(html, "html.parser")
for tr in soup.find('tbody').children:
if isinstance(tr, bs4.element.Tag):
tds = tr('td')
ulist.append([tds[0].string, tds[1].string, tds[3].string])
def printUnivList(ulist, num):
tplt = "{0:^10}\t{1:{3}^10}\t{2:^10}"
# print(tplt.format("排名","学校名称","总分",chr(12288)))
print("{:^10}\t{:^6}\t{:^10}".format("排名", "学校名称", "总分"))
for i in range(num):
u = ulist[i]
# print(tplt.format(u[0],u[1],u[2],chr(12288)))
print("{:^10}\t{:^6}\t{:^10}".format(u[0], u[1], u[2]))
``` |
{
"source": "A125X/Class-of-Neural-Networks",
"score": 3
} |
#### File: Class-of-Neural-Networks/Tests/NeuralNetworksTests.py
```python
import pytest
import numpy as np
from Constructor import NeuralNetworksConstructor as nnc
from sklearn import datasets, preprocessing
import random
#to prove is current model even working
#based on the toy datasets from sklearn
def test_iris_classification():
#obtaining dataset
classes = 3
iris = datasets.load_iris()
arr_out = np.eye(classes, dtype=int)
dataset = [
(iris.data[i][None, ...],
arr_out[iris.target[i]])
for i in range(len(iris.target))]
random.shuffle(dataset)
#using part of the original data to train our neural network
dataset_trainings_len = len(dataset) * 90 // 100
dataset_training = []
for i in range(dataset_trainings_len):
dataset_training.append(dataset[i])
#and other part to test our network
test_dataset = []
for i in range(dataset_trainings_len, len(dataset)):
test_dataset.append(dataset[i])
#providing some hyperparameters to the network
layers = [4, 10, 3]
alpha = 0.00003
batch_size = len(dataset_training)
trainings = len(dataset_training) // batch_size
epochs = 1000
#creating and training the model
model = nnc.NeuralNetwork(layers, 'tanh', 'classification')
model.train(alpha,
trainings,
epochs,
batch_size,
dataset_training,
test_dataset)
#testing our model using test dataset
for i in range(10):
model.show_determined_test(dataset_i = test_dataset[i])
print()
model.show_error()
for i in range(3):
print()
def test_numbers_classification():
#obtaining dataset
classes = 10
digits = datasets.load_digits()
arr_out = np.eye(classes, dtype=int)
dataset = [
(digits.data[i][None, ...],
arr_out[digits.target[i]])
for i in range(len(digits.target))]
random.shuffle(dataset)
#using part of the original data to train our neural network
dataset_trainings_len = len(dataset) * 90 // 100
dataset_training = []
for i in range(dataset_trainings_len):
dataset_training.append(dataset[i])
#and other part to test our network
test_dataset = []
for i in range(dataset_trainings_len, len(dataset)):
test_dataset.append(dataset[i])
#providing some hyperparameters to the network
layers = [64, 32, 16, 10]
alpha = 0.00003
batch_size = len(dataset_training)
trainings = len(dataset_training) // batch_size
epochs = 200
#creating and training the model
model = nnc.NeuralNetwork(layers, 'tanh', 'classification')
model.train(
alpha,
trainings,
epochs,
batch_size,
dataset_training,
test_dataset)
#testing our model using test dataset
for i in range(10):
model.show_determined_test(dataset_i = test_dataset[i])
print()
model.show_error()
for i in range(3):
print()
def test_diabetes_regression():
#obtaining dataset
diabetes = datasets.load_diabetes()
#standartization
diabetes.data = preprocessing.normalize(diabetes.data, axis=0)
dataset = [
(diabetes.data[i][None, ...],
diabetes.target[i]) for i in range(len(diabetes.target))]
random.shuffle(dataset)
#using part of the original data to train our neural network
dataset_trainings_len = len(dataset) * 90 // 100
dataset_training = []
for i in range(dataset_trainings_len):
dataset_training.append(dataset[i])
#and other part to test our network
test_dataset = []
for i in range(dataset_trainings_len, len(dataset)):
test_dataset.append(dataset[i])
#providing some hyperparameters to the network
layers = [10, 10, 1]
alpha = 0.0000035
batch_size = len(dataset_training)
trainings = len(dataset_training) // batch_size
epochs = 2000
#creating and training the model
model = nnc.NeuralNetwork(layers, 'relu', 'regression')
model.train(
alpha,
trainings,
epochs,
batch_size,
dataset_training,
test_dataset)
#testing our model using test dataset
for i in range(10):
model.show_determined_test(dataset_i = test_dataset[i])
print()
model.show_error()
for i in range(3):
print()
def test_boston_regression():
#obtaining dataset
boston = datasets.load_boston()
#standartization
boston.data = preprocessing.normalize(boston.data, axis=0)
dataset = [
(boston.data[i][None, ...],
boston.target[i]) for i in range(len(boston.target))]
random.shuffle(dataset)
#using part of the original data to train our neural network
dataset_trainings_len = len(dataset) * 90 // 100
dataset_training = []
for i in range(dataset_trainings_len):
dataset_training.append(dataset[i])
#and other part to test our network
test_dataset = []
for i in range(dataset_trainings_len, len(dataset)):
test_dataset.append(dataset[i])
#providing some hyperparameters to the network
layers = [13, 10, 1]
alpha = 0.000007
batch_size = len(dataset_training)
trainings = len(dataset_training) // batch_size
epochs = 5000
#creating and training the model
model = nnc.NeuralNetwork(layers, 'relu', 'regression')
model.train(
alpha,
trainings,
epochs,
batch_size,
dataset_training,
test_dataset)
#testing our model using test dataset
for i in range(10):
model.show_determined_test(dataset_i = test_dataset[i])
print()
model.show_error()
for i in range(3):
print()
def test_linnerud_regression():
#obtaining dataset
linnerud = datasets.load_linnerud()
#standartization
linnerud.data = preprocessing.normalize(linnerud.data, axis=0)
dataset = [
(linnerud.data[i][None, ...],
linnerud.target[i]) for i in range(len(linnerud.target))]
random.shuffle(dataset)
#using part of the original data to train our neural network
dataset_trainings_len = len(dataset) * 80 // 100
dataset_training = []
for i in range(dataset_trainings_len):
dataset_training.append(dataset[i])
#and other part to test our network
test_dataset = []
for i in range(dataset_trainings_len, len(dataset)):
test_dataset.append(dataset[i])
#providing some hyperparameters to the network
layers = [3, 10, 3]
alpha = 0.000007
batch_size = len(dataset_training)
trainings = len(dataset_training) // batch_size
epochs = 5000
#creating and training the model
model = nnc.NeuralNetwork(layers, 'relu', 'regression')
model.train(
alpha,
trainings,
epochs,
batch_size,
dataset_training,
test_dataset)
#testing our model using test dataset
for i in range(len(test_dataset)):
model.show_determined_test(dataset_i = test_dataset[i])
print()
model.show_error()
for i in range(3):
print()
def test_wine_classification():
#obtaining dataset
classes = 3
wine = datasets.load_wine()
arr_out = np.eye(classes, dtype=int)
dataset = [
(wine.data[i][None, ...],
arr_out[wine.target[i]])
for i in range(len(wine.target))]
random.shuffle(dataset)
#using part of the original data to train our neural network
dataset_trainings_len = len(dataset) * 80 // 100
dataset_training = []
for i in range(dataset_trainings_len):
dataset_training.append(dataset[i])
#and other part to test our network
test_dataset = []
for i in range(dataset_trainings_len, len(dataset)):
test_dataset.append(dataset[i])
#providing some hyperparameters to the network
layers = [13, 30, 30, 15, 7, 3]
alpha = 0.000002
batch_size = len(dataset_training)
trainings = len(dataset_training) // batch_size
epochs = 4000
#creating and training the model
model = nnc.NeuralNetwork(layers, 'relu', 'classification')
model.train(
alpha,
trainings,
epochs,
batch_size,
dataset_training,
test_dataset)
#testing our model using test dataset
for i in range(10):
model.show_determined_test(dataset_i = test_dataset[i])
print()
model.show_error()
for i in range(3):
print()
def test_breast_cancer_classification():
#obtaining dataset
classes = 2
breast_cancer = datasets.load_breast_cancer()
arr_out = np.eye(classes, dtype=int)
dataset = [
(breast_cancer.data[i][None, ...],
arr_out[breast_cancer.target[i]])
for i in range(len(breast_cancer.target))]
random.shuffle(dataset)
#using part of the original data to train our neural network
dataset_trainings_len = len(dataset) * 90 // 100
dataset_training = []
for i in range(dataset_trainings_len):
dataset_training.append(dataset[i])
#and other part to test our network
test_dataset = []
for i in range(dataset_trainings_len, len(dataset)):
test_dataset.append(dataset[i])
#providing some hyperparameters to the network
layers = [30, 15, 10, 2]
alpha = 0.000002
batch_size = len(dataset_training)
trainings = len(dataset_training) // batch_size
epochs = 1000
#creating and training the model
model = nnc.NeuralNetwork(layers, 'relu', 'classification')
model.train(
alpha,
trainings,
epochs,
batch_size,
dataset_training,
test_dataset)
#testing our model using test dataset
for i in range(10):
model.show_determined_test(dataset_i = test_dataset[i])
print()
model.show_error()
for i in range(3):
print()
#saving & reading weights from a txt file
def test_print_and_read_weights_iris():
#obtaining dataset
classes = 3
iris = datasets.load_iris()
arr_out = np.eye(classes, dtype=int)
dataset = [
(iris.data[i][None, ...], arr_out[iris.target[i]])
for i in range(len(iris.target))]
random.shuffle(dataset)
#using part of the original data to train our neural network
dataset_trainings_len = len(dataset) * 90 // 100
dataset_training = []
for i in range(dataset_trainings_len):
dataset_training.append(dataset[i])
#and other part to test our network
test_dataset = []
for i in range(dataset_trainings_len, len(dataset)):
test_dataset.append(dataset[i])
#providing some hyperparameters to the network
layers = [4, 10, 3]
alpha = 0.00003
batch_size = len(dataset_training)
trainings = len(dataset_training) // batch_size
epochs = 1000
#creating and training the model
model = nnc.NeuralNetwork(layers, 'tanh', 'classification')
model.train(alpha, trainings, epochs, batch_size, dataset_training, test_dataset)
model.print_weights('Weights-Data\output.txt')
#testing our model using test dataset
model.show_error()
print('Trained model accuracy:', model.accuracy(test_dataset))
#creating the second model and obtaining weights for it
model2 = nnc.NeuralNetwork(layers, 'tanh', 'classification')
model2.read_weights('Weights-Data\output.txt')
#testing our model using test dataset
print('Copied model accuracy:', model2.accuracy(dataset))
#saving weights with the best results on the test dataset
def test_best_weights():
#obtaining dataset
classes = 2
breast_cancer = datasets.load_breast_cancer()
arr_out = np.eye(classes, dtype=int)
dataset = [
(breast_cancer.data[i][None, ...],
arr_out[breast_cancer.target[i]])
for i in range(len(breast_cancer.target))]
random.shuffle(dataset)
#using part of the original data to train our neural network
dataset_trainings_len = len(dataset) * 90 // 100
dataset_training = []
for i in range(dataset_trainings_len):
dataset_training.append(dataset[i])
#and other part to test our network
test_dataset = []
for i in range(dataset_trainings_len, len(dataset)):
test_dataset.append(dataset[i])
#providing some hyperparameters to the network
layers = [30, 15, 10, 2]
alpha = 0.000002
batch_size = len(dataset_training)
trainings = len(dataset_training) // batch_size
epochs = 1000
#creating and training the model
model = nnc.NeuralNetwork(layers, 'relu', 'classification')
for i in range(1):
model.train(
alpha,
trainings,
epochs,
batch_size,
dataset_training,
test_dataset)
#testing our latest model using test dataset
print('Latest model accuracy: ',
model.accuracy(test_dataset))
#testing our best model using test dataset
print('Model with the best accuracy on the tests data accuracy: ',
model.accuracy(test_dataset, 'best'))
#print(model.accuracy(dataset) != model.accuracy(dataset, 'best'))
print(model.best_weights == model.weights)
model.show_error()
#different batches length, tests:
#__batch_from_data__(self, dataset, training_counter, batch_size)
#based on the digits classification toy dataset from sklearn
def test_batch_sizes():
#obtaining dataset
classes = 10
digits = datasets.load_digits()
arr_out = np.eye(classes, dtype=int)
dataset = [
(digits.data[i][None, ...],
arr_out[digits.target[i]])
for i in range(len(digits.target))]
random.shuffle(dataset)
#using part of the original data to train our neural network
dataset_trainings_len = len(dataset) * 90 // 100
dataset_training = []
for i in range(dataset_trainings_len):
dataset_training.append(dataset[i])
#and other part to test our network
test_dataset = []
for i in range(dataset_trainings_len, len(dataset)):
test_dataset.append(dataset[i])
#providing some hyperparameters to the network
layers = [64, 32, 16, 10]
alpha = 0.00003
for i in range(1, 5):
batch_size = i * 64
trainings = len(dataset_training) // batch_size
epochs = 100 // trainings
#creating and training the model
model = nnc.NeuralNetwork(layers, 'tanh', 'classification')
model.train(
alpha,
trainings,
epochs,
batch_size,
dataset_training,
test_dataset)
#testing our model using test dataset
model.show_determined_test(dataset_i = test_dataset[i])
print()
model.show_error()
#
``` |
{
"source": "a1270/iqdb_tagger",
"score": 2
} |
#### File: iqdb_tagger/iqdb_tagger/models.py
```python
import datetime
import logging
import os
from difflib import Differ
from urllib.parse import urljoin, urlparse
from typing import Any, Dict, List, Optional, Tuple, TypeVar
import cfscrape
import mechanicalsoup
import requests
import structlog
from bs4 import BeautifulSoup, element
from peewee import (
BooleanField,
CharField,
DateTimeField,
ForeignKeyField,
IntegerField,
Model,
SqliteDatabase,
TextField
)
from PIL import Image
from .custom_parser import get_tags as get_tags_from_parser
from .sha256 import sha256_checksum
from .utils import default_db_path, thumb_folder as default_thumb_folder
DEFAULT_SIZE = 150, 150
db = SqliteDatabase(None)
log = structlog.getLogger()
class BaseModel(Model):
"""base model."""
class Meta:
"""meta."""
database = db
class Program(BaseModel):
"""program model."""
version = IntegerField()
class Tag(BaseModel):
"""Tag model."""
name = CharField()
namespace = CharField(null=True)
@property
def full_name(self):
"""Get full name."""
if self.namespace:
return self.namespace + ':' + self.name
return self.name
class Match(BaseModel):
"""Match model."""
RATING_UNKNOWN = 0
RATING_SAFE = 1
RATING_ERO = 2
RATING_EXPLICIT = 3
RATING_CHOICES = (
(RATING_UNKNOWN, 'Unknown'),
(RATING_SAFE, 'Safe'),
(RATING_ERO, 'Ero'),
(RATING_EXPLICIT, 'Explicit'),
)
href = CharField(unique=True)
thumb = CharField()
rating = CharField()
img_alt = TextField(null=True)
width = IntegerField(null=True)
height = IntegerField(null=True)
@property
def iqdb_thumb(self):
"""Get iqdb thumb url."""
return urljoin('https://iqdb.org', self.thumb)
@property
def size(self):
"""Get size string."""
if self.width and self.height:
return '{}x{}'.format(self.width, self.height)
@property
def link(self):
"""Get href link."""
return urljoin('https://', self.href)
@property
def link_netloc(self):
"""Get readable netloc."""
netloc = urlparse(self.link).netloc
if netloc.startswith('www.'):
netloc = netloc.split('www.', 1)[1]
endings = ['.net', '.com', '.us']
for ending in endings:
if netloc.endswith(ending):
netloc = netloc.split(ending, 1)[0]
return netloc
@property
def tags_from_img_alt(self):
"""Get readable tag from image alt."""
result = []
img_alt = self.img_alt[0]
non_tags_txt = img_alt.split('Tags:')[0]
tags_txt = img_alt.split('Tags:')[1]
result.extend(tags_txt.split(' '))
non_tags_txt.split('Score:')
result.append(non_tags_txt.split('Score:')[0])
result.append('Score:' + non_tags_txt.split('Score:')[1])
result = [x.strip() for x in result if x]
return result
class MatchTagRelationship(BaseModel):
"""match tag relationship."""
match = ForeignKeyField(Match)
tag = ForeignKeyField(Tag)
IM = TypeVar('IM', bound='ImageModel')
class ImageModel(BaseModel):
"""Image model."""
checksum = CharField(unique=True)
width = IntegerField()
height = IntegerField()
path = CharField(null=True)
@property
def size(self):
"""Get size string."""
return '{}x{}'.format(self.width, self.height)
@property
def path_basename(self):
"""Get path basename."""
return os.path.basename(self.path)
@staticmethod
def get_or_create_from_path(img_path: str) -> Tuple[IM, bool]:
"""Get or crate from path."""
checksum = sha256_checksum(img_path)
img = Image.open(img_path)
width, height = img.size
img, created = ImageModel.get_or_create(
checksum=checksum, defaults={
'width': width, 'height': height, 'path': img_path,
}
)
return img, created
def __str__(self):
"""Get string repr."""
return '{}, checksum:{}..., size:{}x{} path:{}'.format(
super().__str__(), self.checksum[:5],
self.width, self.height, self.path
)
class ImageMatchRelationship(BaseModel):
"""Image and match result."""
image = ForeignKeyField(ImageModel)
match_result = ForeignKeyField(Match) # NOQA
class ImageMatch(BaseModel):
"""Image match."""
STATUS_UNKNOWN = 0
STATUS_BEST_MATCH = 1
STATUS_POSSIBLE_MATCH = 2
STATUS_OTHER = 3
STATUS_CHOICES = (
(STATUS_UNKNOWN, 'Unknown'),
(STATUS_BEST_MATCH, 'Best match'),
(STATUS_POSSIBLE_MATCH, 'Possible match'),
(STATUS_OTHER, 'Other'),
)
SP_IQDB = 0
SP_DANBOORU = 1
SP_E621 = 2
SP_ANIME_PICTURES = 3
SP_E_SHUUSHUU = 4
SP_GELBOORU = 5
SP_KONACHAN = 6
SP_SANKAKU = 7
SP_THEANIMEGALLERY = 8
SP_YANDERE = 9
SP_ZEROCHAN = 10
SP_CHOICES = (
(SP_IQDB, 'iqdb'),
(SP_DANBOORU, 'danbooru'),
(SP_E621, 'e621'),
(SP_ANIME_PICTURES, 'anime_pictures'),
(SP_E_SHUUSHUU, 'e_shuushuu'),
(SP_GELBOORU, 'gelbooru'),
(SP_KONACHAN, 'konachan'),
(SP_SANKAKU, 'sankaku'),
(SP_THEANIMEGALLERY, 'theanimegallery'),
(SP_YANDERE, 'yandere'),
(SP_ZEROCHAN, 'zerochan'),
)
match = ForeignKeyField(ImageMatchRelationship)
similarity = IntegerField()
status = IntegerField(choices=STATUS_CHOICES)
search_place = IntegerField(choices=SP_CHOICES)
created_date = DateTimeField(default=datetime.datetime.now)
force_gray = BooleanField(default=False)
@staticmethod
def parse_table(table):
"""Parse table."""
header_tag = table.select_one('th')
status = ImageMatch.STATUS_OTHER
if hasattr(header_tag, 'text'):
header_text = header_tag.text
best_match_text = ('Best match', 'Additional match', 'Probable match:')
if header_text in ('Your image', 'No relevant matches'):
status = None
elif header_text == 'Possible match':
status = ImageMatch.STATUS_POSSIBLE_MATCH
elif header_text in best_match_text:
status = ImageMatch.STATUS_BEST_MATCH
elif header_text == 'Improbable match:':
status = ImageMatch.STATUS_OTHER
else:
log.debug('header text', v=header_text)
if status is None:
return {}
td_tags = table.select('td')
assert '% similarity' in td_tags[-1].text, "similarity was not found in " + header_tag.text
size_and_rating_text = td_tags[-2].text
rating = Match.RATING_UNKNOWN
for item in Match.RATING_CHOICES:
if '[{}]'.format(item[1]) in size_and_rating_text:
rating = item[0]
size = size_and_rating_text.strip().split(' ', 1)[0].split('×')
if len(size) == 1 and '×' not in size_and_rating_text:
size = (None, None)
else:
size = (int(size[0]), int(size[1]))
img_tag = table.select_one('img')
img_alt = img_tag.attrs.get('alt')
img_title = img_tag.attrs.get('title')
if img_alt == '[IMG]' and img_title is None:
img_alt = None
if img_alt != img_title:
d = Differ()
diff_text = '\n'.join(d.compare(img_alt, img_title))
log.warning(
'title and alt attribute of img tag is different.\n{}'.format(
diff_text
)
)
return {
# match
'status': status,
'similarity': td_tags[-1].text.split('% similarity', 1)[0],
# match result
'href': table.select_one('a').attrs.get('href', None),
'thumb': table.select_one('img').attrs.get('src', None),
'rating': rating,
'size': size,
'img_alt': img_alt,
}
@staticmethod
def parse_page(page):
"""Parse page."""
if isinstance(page, str):
page = BeautifulSoup(page, 'lxml')
elif not isinstance(page, BeautifulSoup):
if not os.path.isfile(page):
raise ValueError('File not Exist: {}'.format(page))
with open(page) as f:
soup = BeautifulSoup(f.read(), 'lxml')
page = soup
# parse table
tables = page.select('.pages table')
for table in tables:
res = ImageMatch.parse_table(table)
if not res:
continue
additional_res = get_additional_result_from_table(table, res)
if additional_res:
yield additional_res
yield res
@staticmethod
def get_or_create_from_page(page, image, place=None, force_gray=False):
"""Get or create from page result."""
if place is None:
place = ImageMatch.SP_IQDB
items = ImageMatch.parse_page(page)
for item in items:
match_result, _ = Match.get_or_create(
href=item['href'], defaults={
'thumb': item['thumb'],
'rating': item['rating'],
'img_alt': item['img_alt'],
'width': item['size'][0],
'height': item['size'][1],
}
)
imr, _ = ImageMatchRelationship.get_or_create(
image=image,
match_result=match_result,
)
yield ImageMatch.get_or_create(
match=imr,
search_place=place,
force_gray=force_gray,
defaults={
'status': item['status'],
'similarity': item['similarity'],
}
)
@property
def status_verbose(self):
"""Get verbose status."""
return dict(ImageMatch.STATUS_CHOICES)[self.status]
@property
def search_place_verbose(self):
"""Get verbose search place."""
return dict(ImageMatch.SP_CHOICES)[self.search_place]
def get_additional_result_from_table(table: element.Tag, last_result: Dict[str, Any]) -> Dict[str, Any]:
"""Get additional result from html table."""
a_tags = table.select('a')
assert len(a_tags) < 3, "Unexpected html received at parse_page. Malformed link"
additional_res = {} # type: Dict[str, Any]
if len(a_tags) == 2:
additional_res = last_result
additional_res['href'] = a_tags[1].attrs.get('href', None)
return additional_res
iqdb_url_dict = {
'iqdb': ('http://iqdb.org', ImageMatch.SP_IQDB),
'danbooru': ('http://danbooru.iqdb.org', ImageMatch.SP_DANBOORU),
'e621': ('http://iqdb.harry.lu', ImageMatch.SP_E621),
'anime_pictures': ('https://anime-pictures.iqdb.org', ImageMatch.SP_ANIME_PICTURES),
'e_shuushuu': ('https://e-shuushuu.iqdb.org', ImageMatch.SP_E_SHUUSHUU),
'gelbooru': ('https://gelbooru.iqdb.org', ImageMatch.SP_GELBOORU),
'konachan': ('https://konachan.iqdb.org', ImageMatch.SP_KONACHAN),
'sankaku': ('https://sankaku.iqdb.org', ImageMatch.SP_SANKAKU),
'theanimegallery': ('https://theanimegallery.iqdb.org', ImageMatch.SP_THEANIMEGALLERY),
'yandere': ('https://yandere.iqdb.org', ImageMatch.SP_YANDERE),
'zerochan': ('https://zerochan.iqdb.org', ImageMatch.SP_ZEROCHAN),
}
class ThumbnailRelationship(BaseModel):
"""Thumbnail tag relationship."""
original = ForeignKeyField(ImageModel, related_name='thumbnails') # NOQA
thumbnail = ForeignKeyField(ImageModel)
@staticmethod
def get_or_create_from_image(
image: ImageModel, size: Tuple[int, int],
thumb_folder: str = None, thumb_path: str = None, img_path: str = None):
"""Get or create from image."""
thumbnails = [
x for x in image.thumbnails
if x.thumbnail.width == size[0] and x.thumbnail.height == size[1]
]
if thumbnails:
assert len(thumbnails) == 1, "There was not one thumbnail for the result"
return thumbnails[0], False
if thumb_path is None:
thumb_path = '{}-{}-{}.jpg'.format(image.checksum, size[0], size[1])
if thumb_folder:
thumb_path = os.path.join(thumb_folder, thumb_path)
if not os.path.isfile(thumb_path) or os.path.getsize(thumb_path) == 0:
im = Image.open(image.path) if img_path is None else Image.open(img_path)
im.thumbnail(size, Image.ANTIALIAS)
try:
im.save(thumb_path, 'JPEG')
except OSError as e:
valid_err = [
'cannot write mode RGBA as JPEG',
'cannot write mode P as JPEG',
'cannot write mode LA as JPEG',
]
err_str = str(e)
if err_str in valid_err:
log.debug('Converting to JPEG for error fix', err=err_str)
im = im.convert('RGB')
im.save(thumb_path, 'JPEG')
else:
raise e
thumb = ImageModel.get_or_create_from_path(thumb_path)[0] # type: ImageModel
return ThumbnailRelationship.get_or_create(
original=image, thumbnail=thumb)
def init_db(db_path=None, version=1):
"""Init db."""
if db_path is None:
db_path = default_db_path
db.init(db_path)
if not os.path.isfile(db_path):
model_list = [
ImageMatch,
ImageMatchRelationship,
ImageModel,
Match,
MatchTagRelationship,
Program,
Tag,
ThumbnailRelationship
]
db.create_tables(model_list)
version = Program(version=version)
version.save()
else:
logging.debug('db already existed.')
def get_posted_image(
img_path: str,
resize: Optional[bool] = False, size: Optional[Tuple[int, int]] = None,
output_thumb_folder: Optional[str] = default_thumb_folder,
thumb_path: Optional[str] = None) -> ImageModel:
"""Get posted image."""
img = ImageModel.get_or_create_from_path(img_path)[0] # type: ImageModel
def_thumb_rel, _ = ThumbnailRelationship.get_or_create_from_image(
image=img,
thumb_folder=output_thumb_folder,
size=DEFAULT_SIZE,
thumb_path=thumb_path,
img_path=img_path
)
resized_thumb_rel = None
if resize and size:
resized_thumb_rel, _ = \
ThumbnailRelationship.get_or_create_from_image(
image=img,
thumb_folder=output_thumb_folder,
size=size,
img_path=img_path
)
elif resize:
# use thumbnail if no size is given
resized_thumb_rel = def_thumb_rel
else:
# no resize, return actual image
return img
return resized_thumb_rel.thumbnail \
if resized_thumb_rel is not None else img
def get_page_result(
image: str,
url: str,
browser: Optional[mechanicalsoup.StatefulBrowser] = None,
use_requests: Optional[bool] = False):
"""Get iqdb page result.
Args:
image: image path to be uploaded.
url: iqdb url
browser: browser instance
use_requests: use requests package instead from browser
Returns:
HTML page from the result.
"""
if use_requests:
files = {'file': open(image, 'rb')}
resp = requests.post(url, files=files, timeout=10)
return resp.text
browser = mechanicalsoup.StatefulBrowser(soup_config={'features': 'lxml'})
browser.raise_on_404 = True
browser.open(url)
html_form = browser.select_form('form')
html_form.input({'file': image})
browser.submit_selected()
# if ok, will output: <Response [200]>
return browser.get_current_page()
def get_tags_from_match_result(
match_result: Match,
browser: Optional[mechanicalsoup.StatefulBrowser] = None,
scraper: Optional[cfscrape.CloudflareScraper] = None
) -> List[Tag]:
"""Get tags from match result."""
filtered_hosts = ['anime-pictures.net', 'www.theanimegallery.com']
res = MatchTagRelationship.select() \
.where(MatchTagRelationship.match == match_result)
tags = [x.tag for x in res]
is_url_in_filtered_hosts = urlparse(match_result.link).netloc in \
filtered_hosts
if is_url_in_filtered_hosts:
log.debug('URL in filtered hosts, no tag fetched', url=match_result.link)
elif not tags:
try:
if browser is None:
browser = mechanicalsoup.StatefulBrowser(soup_config={'features': 'lxml'})
browser.raise_on_404 = True
browser.open(match_result.link, timeout=10)
page = browser.get_current_page()
new_tags = get_tags_from_parser(page, match_result.link, scraper)
new_tag_models = []
if new_tags:
for tag in new_tags:
namespace, tag_name = tag
tag_model = Tag.get_or_create(name=tag_name, namespace=namespace)[0] # type: Tag
MatchTagRelationship.get_or_create(match=match_result, tag=tag_model)
new_tag_models.append(tag_model)
else:
log.debug('No tags found.')
tags.extend(new_tag_models)
except (requests.exceptions.ConnectionError, mechanicalsoup.LinkNotFoundError) as e:
log.error(str(e), url=match_result.link)
return tags
``` |
{
"source": "a1291762/wallpaper_helper",
"score": 2
} |
#### File: a1291762/wallpaper_helper/ImageWindow.py
```python
import sys
try:
from PySide6.QtCore import *
from PySide6.QtGui import *
except Exception:
try:
from PySide2.QtCore import *
from PySide2.QtGui import *
except Exception:
from PySide.QtCore import *
from PySide.QtGui import *
from Ui_ImageWindow import *
import os
import shutil
import filecmp
FORWARDS = False
BACKWARDS = True
VIEW_ALL = object()
VIEW_UNUSED_ORIGINALS = object()
VIEW_CROPPED = object()
VIEW_UNCROPPED = object()
def forceExt(path, ext):
file = os.path.basename(path)
path = os.path.dirname(path)
return path + "/" + os.path.splitext(file)[0] + "." + ext
def forceJpeg(path):
return forceExt(path, "jpg")
class ImageWindow(QMainWindow):
image = None
ui = None
viewMode = VIEW_ALL
def __init__(self):
super().__init__()
self.ui = Ui_ImageWindow()
self.ui.setupUi(self)
# focus the label and listen for keyboard events from it
self.ui.label.setFocus()
self.ui.label.installEventFilter(self)
# set (or load from config) the desktop size
try:
desktop = QGuiApplication.primaryScreen().size()
except Exception:
desktop = QDesktopWidget()
settings = QSettings()
desktopWidth = int(settings.value("desktopWidth", desktop.width()))
desktopHeight = int(settings.value("desktopHeight", desktop.height()))
self.ui.deskWidth.setText("%d" % desktopWidth)
self.ui.deskHeight.setText("%d" % desktopHeight)
# react to size changes
self.ui.deskWidth.textChanged.connect(lambda: self._setDesktopFrame(True))
self.ui.deskHeight.textChanged.connect(lambda: self._setDesktopFrame(True))
self._setDesktopFrame(False)
# path buttons need to read/save config
self.ui.wallpaper.setSettingsKey("wallpaper")
self.ui.originals.setSettingsKey("originals")
# help button
self.ui.helpBtn.toggled.connect(self._toggleHelp)
self._toggleHelp(False)
# Load the initial image
file = settings.value("image")
self._loadInitialImage(file)
def _loadInitialImage(self, file):
if file:
# load previous image
try:
self._loadFile(file)
print("Loaded previous image")
return
except Exception as e:
print("Failed to load initial image "+file+" "+str(e))
pass # continue
path = self.ui.wallpaper.path
if path and os.path.exists(path) and os.path.isdir(path):
# load the first image from the path
self.imagePath = path + "/." # set imagePath to a file inside the wallpaper folder
self._selectNextImage(FORWARDS)
print("Loaded path image?")
def dragEnterEvent(self, e):
file = e.mimeData().urls()[0].toLocalFile().strip()
self.ui.label.setText(file)
e.accept()
def dragLeaveEvent(self, e):
self.ui.label.setText("Drop an image onto the window")
e.accept()
def dropEvent(self, e):
file = e.mimeData().urls()[0].toLocalFile().strip()
try:
self._loadFile(file)
except:
self.ui.label.setText("Drop an image onto the window")
e.accept()
def _setDesktopFrame(self, saveSettings):
# Used to draw the clip rect
self.ui.label.setDesktop(
int(self.ui.deskWidth.text()),
int(self.ui.deskHeight.text()))
if saveSettings:
settings = QSettings()
settings.setValue("desktopWidth", self.ui.deskWidth.text())
settings.setValue("desktopHeight", self.ui.deskHeight.text())
def _loadFile(self, file, force=False):
if self.viewMode != VIEW_ALL:
backupPath, wallpaperPath = self._getPaths(file)
wallpaperPath = forceJpeg(wallpaperPath)
if self.viewMode == VIEW_UNUSED_ORIGINALS:
if backupPath == file and not os.path.isfile(wallpaperPath):
pass # This is an unused original
elif not force:
raise Exception("Not an unused original")
elif self.viewMode == VIEW_CROPPED:
if file == wallpaperPath and \
os.path.isfile(backupPath) and \
not filecmp.cmp(file, backupPath):
pass
elif not force:
raise Exception("Not a cropped image")
elif self.viewMode == VIEW_UNCROPPED:
if file == wallpaperPath and \
os.path.isfile(backupPath) and \
filecmp.cmp(file, backupPath):
pass
elif not force:
raise Exception("Not an uncropped image")
# make sure the image is valid
image = QImage(file)
assert(image.isNull() == False)
self.ui.label.setImage(image)
self.imagePath = file # for forwards/backwards moving
settings = QSettings()
settings.setValue("image", file) # for close/reopen
title = file
try:
# Indicate if the original file is different to the wallpaper file
backupPath, wallpaperPath = self._getPaths()
if file != backupPath and \
os.path.isfile(backupPath) and \
not filecmp.cmp(file, backupPath):
title += "*"
except Exception as e:
print("Error checking if backup and wallpaper differ?! "+str(e))
self.setWindowTitle(title)
def eventFilter(self, object, e):
# I only want the key press events
if e.type() != QEvent.KeyPress:
return False
handled = True
modifiers = e.modifiers()
key = e.key()
if modifiers & Qt.ControlModifier:
if key == Qt.Key_S: self._useCroppedImage() # control + S = use cropped image
elif key == Qt.Key_R: self._useOriginalImage() # control + R = use original image
elif key == Qt.Key_A: self.ui.label.selectAll() # control + A = select all
else: handled = False
elif modifiers & Qt.ShiftModifier:
if key == Qt.Key_Right: self._moveFrame(1, 0) # Shift + Arrow = move frame (precise)
elif key == Qt.Key_Left: self._moveFrame(-1, 0)
elif key == Qt.Key_Up: self._moveFrame(0, -1)
elif key == Qt.Key_Down: self._moveFrame(0, 1)
elif key == Qt.Key_O: self._toggleUnusedOriginals() # Shift + O = toggle unused originals
elif key == Qt.Key_C: self._toggleCroppedImages() # Shift + C = toggle cropped images
else: handled = False
else:
if key == Qt.Key_Right: self._selectNextImage(FORWARDS) # Right = Next
elif key == Qt.Key_Left: self._selectNextImage(BACKWARDS) # Left = Prev
elif key == Qt.Key_Minus: self._addPadding(-1) # Plus/Minus = grow/shrink (precise)
elif key == Qt.Key_Plus: self._addPadding(1)
elif key == Qt.Key_Equal: self._addPadding(1)
elif key == Qt.Key_O: self._toggleOriginal() # O = toggle original
elif key == Qt.Key_Space: self._togglePreview() # Space = toggle preview
elif key == Qt.Key_Backspace: self._removeImage() # Do not use image
elif key == Qt.Key_B: self._toggleBackground() # Toggle background colour
else: handled = False
if handled:
e.accept()
return True
return False
def _selectNextImage(self, backwards):
path = os.path.dirname(self.imagePath)
files = self._getImages(path)
if len(files) == 0:
print("No files?!")
return
# Simply by reversing the list, we can use the same logic to move backwards
if backwards:
files.reverse()
if self._selectNextImage2(path, files):
return
print("load first file")
file = path+"/"+files[0]
try:
self._loadFile(file)
except Exception as e:
print("exception: {}".format(e))
self.ui.label.setText("Drop an image onto the window")
def _selectNextImage2(self, path, files):
startSearching = False
for f in files:
file = path+"/"+f
if file == self.imagePath:
startSearching = True
elif startSearching:
try:
self._loadFile(file)
return True
except Exception as e:
pass # keep looking
# wrap around to the start
for f in files:
file = path+"/"+f
if file == self.imagePath:
return False # got to the end
try:
self._loadFile(file)
return True
except Exception as e:
pass # keep looking
def _getImages(self, path):
#print(f"_getImages {path}")
allFiles = os.listdir(path)
#print(f"allFiles {allFiles}")
allFiles.sort()
files = []
for f in allFiles:
# skip hidden (dot) files
if f[0] == ".": continue
for fmt in QImageReader.supportedImageFormats():
ext = "."+bytes(fmt).decode()
if f.endswith(ext):
files.append(f)
break
return files
def _getPaths(self, imagePath = None):
if imagePath == None:
imagePath = self.imagePath
backupPath = self.ui.originals.path
wallpaperPath = self.ui.wallpaper.path
if not backupPath or not wallpaperPath:
print("Both the wallpaper and originals paths must be set!")
return None, None
fileName = os.path.basename(imagePath)
backupPath += "/"+fileName
if not os.path.isfile(backupPath):
#print("trying alternative backupPath values...")
for fmt in QImageReader.supportedImageFormats():
altPath = forceExt(backupPath, str(fmt))
#print("altPath "+altPath)
if os.path.isfile(altPath):
backupPath = altPath
break
wallpaperPath += "/"+fileName
return backupPath, wallpaperPath
def _useCroppedImage(self):
backupPath, wallpaperPath = self._getPaths()
if not backupPath or not wallpaperPath:
return
# If original doesn't exist, create it
if not os.path.isfile(backupPath):
shutil.copy(self.imagePath, backupPath)
# Save cropped image
origWallpaperPath = wallpaperPath
if os.path.isfile(wallpaperPath):
os.remove(wallpaperPath)
wallpaperPath = forceJpeg(wallpaperPath)
self.ui.label.saveImage(wallpaperPath)
# If the wallpaper image is open, reload it
# If another path was opened, do nothing
if wallpaperPath == self.imagePath or \
origWallpaperPath == self.imagePath:
self._loadFile(self.imagePath, force=True)
def _useOriginalImage(self):
backupPath, wallpaperPath = self._getPaths()
if not backupPath or not wallpaperPath:
return
# If original doesn't exist, create it
if not os.path.isfile(backupPath):
if wallpaperPath == self.imagePath:
shutil.move(self.imagePath, backupPath)
else:
shutil.copy(self.imagePath, backupPath)
# Save uncropped image
origWallpaperPath = wallpaperPath
if backupPath.endswith(".jpg"):
shutil.copy(backupPath, wallpaperPath)
else:
if os.path.isfile(wallpaperPath):
os.remove(wallpaperPath)
wallpaperPath = forceJpeg(wallpaperPath)
QImage(backupPath).save(wallpaperPath)
# If the wallpaper image is open, reload it
# If another path was opened, do nothing
if wallpaperPath == self.imagePath or \
origWallpaperPath == self.imagePath:
self._loadFile(self.imagePath, force=True)
def _addPadding(self, amount):
self.ui.label.addPadding(amount)
def _togglePreview(self):
self.ui.label.togglePreview()
def _toggleOriginal(self):
backupPath, wallpaperPath = self._getPaths()
if os.path.isfile(backupPath):
if self.ui.label.toggleOriginal(backupPath):
self.setWindowTitle(backupPath)
else:
title = self.imagePath
if self.imagePath != backupPath and \
os.path.isfile(backupPath) and \
not filecmp.cmp(self.imagePath, backupPath):
title += "*"
self.setWindowTitle(title)
def _moveFrame(self, x, y):
self.ui.label.moveFrame(QPoint(x, y))
def _toggleUnusedOriginals(self):
if self.viewMode == VIEW_UNUSED_ORIGINALS:
self.viewMode = VIEW_ALL
self.ui.mode.setText("")
else:
self.viewMode = VIEW_UNUSED_ORIGINALS
self.ui.mode.setText("unused originals")
def _toggleCroppedImages(self):
if self.viewMode == VIEW_CROPPED:
self.viewMode = VIEW_UNCROPPED
self.ui.mode.setText("uncropped")
elif self.viewMode == VIEW_UNCROPPED:
self.viewMode = VIEW_ALL
self.ui.mode.setText("")
else:
self.viewMode = VIEW_CROPPED
self.ui.mode.setText("cropped")
def _removeImage(self):
backupPath, wallpaperPath = self._getPaths()
if not backupPath or not wallpaperPath:
return
# If original doesn't exist, create it
if not os.path.isfile(backupPath):
shutil.move(self.imagePath, backupPath)
# only remove the wallpaper (not an out-of-wallpaper image)
if self.imagePath == wallpaperPath:
os.remove(self.imagePath)
def _toggleHelp(self, visible):
if visible:
self.ui.help.show()
else:
self.ui.help.hide()
def _toggleBackground(self):
bg = Qt.black
if self.ui.label.paddingBackground == Qt.black:
bg = Qt.white
self.ui.label.paddingBackground = bg
self.ui.label._setPaddedFromImage()
``` |
{
"source": "a1346054/tldr.py",
"score": 2
} |
#### File: tldr.py/tests/test_list.py
```python
from __future__ import absolute_import
from basic import BasicTestCase
class Testlist(BasicTestCase):
def test_list_command(self):
result = self.call_list_command('')
assert result.output == 'du\ntcpflow\ntldr\n'
def test_list_command_with_platform(self):
result = self.call_list_command('osx')
assert result.output == 'airport\ndu\ntldr\n'
```
#### File: tldr.py/tests/test_locate.py
```python
from __future__ import absolute_import
from os import path
from basic import BasicTestCase
class TestLocate(BasicTestCase):
def test_common_command(self):
assert (self.call_locate_command('tldr', platform='').output.strip() ==
path.join(self.repo_dir, 'pages', 'common', 'tldr.md'))
``` |
{
"source": "a13labs/dataplane-python",
"score": 3
} |
#### File: dataplane-python/src/restapi_python.py
```python
import logging
import requests
from types import SimpleNamespace
import json
from simplejson.errors import JSONDecodeError
__version__ = "0.0.1"
logger = logging.getLogger(__name__)
# Default timeout
DEFAULT_TIMEOUT = 60
class RestAPIException(Exception):
"""Superclass for exceptions thrown by this module"""
def __init__(self, response: requests.Response):
self.__dict__ = response.json()
class BadRequestException(RestAPIException):
"""
A REST method returns 400 (BAD REQUEST)
"""
class UnauthorizedException(RestAPIException):
"""
A REST method returns 401 (UNAUTHORIZED)
"""
class ForbiddenException(RestAPIException):
"""
A REST method returns 403 (FORBIDDEN)
"""
class NotFoundException(RestAPIException):
"""
A REST method returns 404 (NOT FOUND)
"""
class MethodNotAllowedException(RestAPIException):
"""
A REST method returns 405 (METHOD NOT ALLOWED)
"""
class ServerErrorException(RestAPIException):
"""
A REST method returns 500 (INTERNAL SERVER ERROR)
"""
class ServiceUnavailableException(RestAPIException):
"""
The server is currently unable to handle the request
"""
class RestAPI(object):
"""
Represents a RestAPI REST server
:param string path: protocol://hostname:port of the server.
:param string username: Username used to authenticate against the server
:param string password: Password used to authenticate against the server
:param bool verify: Whether to verify certificates on SSL connections.
:param string version: Version of the API to use, defaults to v1
:param float timeout: The timeout value to use, in seconds. Default is 305.
"""
def __init__(
self,
url: str,
username: str,
password: str,
verify: bool = True,
timeout: int = DEFAULT_TIMEOUT,
):
self.session = requests.Session()
self.session.verify = verify
self.session.auth = (username, password)
user_agent = "dataplane-python {} ({})".format(
__version__, self.session.headers["User-Agent"]
)
self.session.headers.update(
{
"Accept": "application/json",
"User-Agent": user_agent,
"X-Requested-By": user_agent,
}
)
self.verify = verify
self.timeout = timeout
self.base_url = url
def __repr__(self):
return "<RestAPI API url='{}' username='{}'>".format(
self.base_url, self.session.auth[0]
)
def get(self, url, **kwargs):
"""
Does a GET request to the specified URL.
Returns the decoded JSON.
"""
response = self.session.get(
url, timeout=self.timeout, params=kwargs)
return self._handle_response(response)
def post(self, url, prefer_async=False, body=None, **kwargs):
"""
Does a POST request to the specified URL.
Returns the decoded JSON.
"""
headers = {"Prefer": "respond-async"} if prefer_async else None
if body is not None:
if headers is None:
headers = {"Content-Type": "application/json"}
else:
headers.update({"Content-Type": "application/json"})
response = self.session.post(
url, headers=headers, timeout=self.timeout, json=body, params=kwargs
)
return self._handle_response(response)
def put(self, url, prefer_async=False, body=None, **kwargs):
"""
Does a PUT request to the specified URL.
Returns the decoded JSON.
"""
headers = {"Prefer": "respond-async"} if prefer_async else None
if body is not None:
if headers is None:
headers = {"Content-Type": "application/json"}
else:
headers.update({"Content-Type": "application/json"})
response = self.session.put(
url, headers=headers, timeout=self.timeout, data=body, params=kwargs
)
return self._handle_response(response)
def delete(self, url, prefer_async=False, **kwargs):
"""
Does a DELETE request to the specified URL.
Returns the decoded JSON.
"""
headers = {"Prefer": "respond-async"} if prefer_async else None
response = self.session.delete(
url, headers=headers, timeout=self.timeout, params=kwargs
)
return self._handle_response(response)
def _handle_response(self, response):
logger.debug(
"Sent %s request to %s, with headers:\n%s\n\nand body:\n%s",
response.request.method,
response.request.url,
"\n".join(
["{0}: {1}".format(k, v)
for k, v in response.request.headers.items()]
),
response.request.body,
)
logger.debug(
"Recieved response:\nHTTP %s\n%s\n\n%s",
response.status_code,
"\n".join(["{0}: {1}".format(k, v)
for k, v in response.headers.items()]),
response.content.decode(),
)
if not response.ok:
self._handle_error(response)
try:
body = response.json()
if isinstance(body, dict):
return RestAPIResponse(body)
elif isinstance(body, list):
result = []
for item in body:
result.append(RestAPIResponse(item))
return result
except JSONDecodeError:
pass
return None
@staticmethod
def _handle_error(response):
exception_type = RestAPIException
if response.status_code == 400:
exception_type = BadRequestException
elif response.status_code == 401:
raise UnauthorizedException(response)
elif response.status_code == 403:
exception_type = ForbiddenException
elif response.status_code == 404:
exception_type = NotFoundException
elif response.status_code == 405:
exception_type = MethodNotAllowedException
elif response.status_code == 500:
exception_type = ServerErrorException
elif response.status_code == 503:
exception_type = ServiceUnavailableException
raise exception_type(response)
def __call__(self, **kwargs):
return self.get(self.base_url, **kwargs)
def __getattr__(self, name):
return RestAPIEndpoint(name=name, path=name, dataplane=self)
class RestAPIResponse(object):
"""
Represents a RestAPI response
:param requests.Response reponse: response from server
"""
def __init__(self, body: dict):
self.__dict__ = RestAPIResponse._dict_to_sn(body).__dict__
@staticmethod
def _dict_to_sn(d: dict):
result = SimpleNamespace()
for k, v in d.items():
if isinstance(v, dict):
setattr(result, k, RestAPIResponse._dict_to_sn(v))
continue
setattr(result, k, v)
return result
def __repr__(self):
return "<RestAPI Response dict='{}'>".format(
self.__dict__
)
class RestAPIEndpoint(object):
"""
Represents a RestAPI Endponint
:param string name: name of the endpoint.
:param string path: path
:param RestAPI dataplane: dataplane Server object
"""
def __init__(self, name: str, path: str, dataplane: RestAPI):
self._name = name
self._path = path
self._api = dataplane
pass
def __getattr__(self, name):
return RestAPIEndpoint(name, "{}/{}".format(self._path, name), self._api)
def __call__(self, path=None, api_version: str = "v1", method: str = "GET", **kwargs):
try:
attr = object.__getattribute__(self, method.lower())
return attr(path=path, api_version=api_version, **kwargs)
except AttributeError:
return None
def get(self, api_version: str = "v1", path=None, **kwargs):
"""
Does a GET request to the specified URL.
Returns the decoded JSON.
"""
url = "{}/{}/{}".format(self._api.base_url, api_version, self._path)
if path is not None:
url = "{}/{}".format(url, path)
return self._api.get(url=url, **kwargs)
def post(self, api_version: str = "v1", path=None, **kwargs):
"""
Does a POST request to the specified URL.
Returns the decoded JSON.
"""
url = "{}/{}/{}".format(self._api.base_url, api_version, self._path)
if path is not None:
url = "{}/{}".format(url, path)
return self._api.post(url=url, **kwargs)
def put(self, api_version: str = "v1", path=None, **kwargs):
"""
Does a PUT request to the specified URL.
Returns the decoded JSON.
"""
url = "{}/{}/{}".format(self._api.base_url, api_version, self._path)
if path is not None:
url = "{}/{}".format(url, path)
return self._api.put(url=url, **kwargs)
def delete(self, api_version: str = "v1", path=None, **kwargs):
"""
Does a DELETE request to the specified URL.
Returns the decoded JSON.
"""
url = "{}/{}/{}".format(self._api.base_url, api_version, self._path)
if path is not None:
url = "{}/{}".format(url, path)
return self._api.delete(url=url, **kwargs)
``` |
{
"source": "a13ph/text-to-notion-bot",
"score": 3
} |
#### File: a13ph/text-to-notion-bot/db.py
```python
import os
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
engine = create_engine(os.environ['DATABASE_URL_NOTION'])
DBSession = sessionmaker(bind=engine)
session = DBSession()
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String(64))
notion_api_key = Column(String(250))
page_address = Column(String(250))
page_title = Column(String(250))
if not engine.dialect.has_table(engine, 'users'):
Base.metadata.create_all(engine)
def create_new_user(session, username):
if not session.query(User).filter(User.username == username).first():
user = User(username=username)
session.add(user)
session.commit()
def check_if_user_exists(session, username):
if not session.query(User).filter(User.username == username).first():
return False
return True
``` |
{
"source": "a13xg0/parametirc",
"score": 3
} |
#### File: a13xg0/parametirc/plan.py
```python
from __future__ import division
import sys
import copy
from task import Task
class Plan:
""" Класс-контейнер для текущего плана"""
def __init__(self, source=None):
"""
Конструктор класса плана
Если указан параметер source, то выполнятеся копирование если источник был типа Plan
:param source: исходный план для копирования значений
:type source: Plan
"""
self.B = []
""" Индексы базисных переменных """
self.A = [[]]
""" Матрица коэффициентов уравнения """
self.alpha = []
""" Оценка каждого столбца. комплесное число"""
self.ai0 = []
""" Свободные коэффициенты """
self.sigma = []
""" Оценки базисных переменных """
self.task = None
""" Исходная задача """
self.z = None
""" Значение целевой функции в текущем плане """
self.last_error = ""
""" Текст последней ошибки """
if source is not None:
if not(isinstance(source, Plan)):
self.last_error = "Источник не типа Plan"
return
self.B = copy.copy(source.B)
self.A = copy.deepcopy(source.A)
self.alpha = copy.copy(source.alpha)
self.alpha = copy.copy(source.alpha)
self.ai0 = copy.copy(source.ai0)
self.task = source.task
for i in range(self.task.m, 0, -1):
self.sigma.append(None)
def set_task(self, source_task):
"""
Установка задачи плана и инициализация
:param source_task: Исходная задача для установки параметров
:type source_task: Task
"""
if not isinstance(source_task, Task):
self.last_error = "Задача не типа Task"
return False
self.task = source_task
self.A = copy.deepcopy(self.task.A)
for i in range(self.task.m, 0, -1):
k = self.task.n - i
# Сохраняем в качестве исходных базисных переменных последние переменные в векторе X
self.B.append(k)
# Заполняем свободные коэффициенты плана
self.ai0.append(self.task.X[k])
self.sigma.append(None)
for i in range(0, self.task.n):
self.alpha.append(None)
self.last_error = ""
return True
def calc_alpha_assessment(self):
""" Пересчитать оценки переменных """
for j in range(0, self.task.n):
self.alpha[j] = 0
for i in range(0, self.task.m):
self.alpha[j] += self.task.C[self.B[i]] * self.A[i][j]
self.alpha[j] -= self.task.C[j]
def calc_sigma(self, col=0):
"""
Посчитать оценки строк
:param col: Столбец по которому оцениваем коэффициет
:type col: Integer
"""
for i in range(0, self.task.m):
if self.A[i][col] > 0:
self.sigma[i] = self.ai0[i] / self.A[i][col]
else:
self.sigma[i] = None
def calc_z(self):
""" Посчитать значение целевой функции в плане """
self.z = 0
for i in range(0, self.task.m):
self.z += self.task.C[self.B[i]] * self.ai0[i]
return self.z
def is_optimal(self, L=None):
""" Проверка плана на оптимальность """
if L is None:
for i in range(0, self.task.n):
if round(self.alpha[i].imag, 10) != 0:
# если пристутствует параметр, то план не известо оптимален или нет
return False
if round(self.alpha[i].real, 10) < 0:
return False
else:
for i in range(0, self.task.n):
val = round(self.alpha[i].real + self.alpha[i].imag * L, 10)
if val < 0:
return False
return True
def is_only_l(self):
""" В оценках плана нет отрицательнцх целх значений, остались только значения L """
for i in range(0, self.task.n):
if round(self.alpha[i].real) < 0 and round(self.alpha[i].imag) == 0:
return False
for i in range(0, self.task.n):
if round(self.alpha[i].imag) != 0:
return True
return False
def is_solvable(self, L=None):
""" Оценить разрешимость плана, в случае неоптимальности плана, проверка на возможность перехода """
if L is None:
for j in range(0, self.task.n):
if self.alpha[j].real < 0:
# если оценка отрицательна, то проверяем столбцы
is_good = False # предполагаем худший вариант, что нет положительных элементов в столбце
for i in range(0, self.task.m):
if self.A[i][j] > 0:
is_good = True
break
if not is_good:
# нашёлся столбец без положительных коэффициентов
return False
else:
for j in range(0, self.task.n):
if self.alpha[j].real + self.alpha[j].imag * L< 0:
# если оценка отрицательна, то проверяем столбцы
is_good = False # предполагаем худший вариант, что нет положительных элементов в столбце
for i in range(0, self.task.m):
if self.A[i][j] > 0:
is_good = True
break
if not is_good:
# нашёлся столбец без положительных коэффициентов
return False
return True
############################
# #
# МЕТОДЫ ОФОРМЛЕНИЯ ВЫВОДА #
# #
############################
def sign_M(self, digit):
if digit == 0:
return "0"
elif digit == 1:
return "+L"
elif digit == -1:
return "-L"
else:
return '{:>+7.2f}L'.format(digit)
def format_M(self, digit):
out = ""
if type(digit) is complex:
if digit.real == 0:
out = '{:^15s}'.format(self.sign_M(digit.imag))
elif round(digit.imag, 5) == 0:
out = '{:^15.2f}'.format(digit.real)
else:
out = '{:>4.2f}{:>4s}'.format(digit.real, self.sign_M(digit.imag))
elif type(digit) is str:
out = '{:^15s}'.format(digit)
elif digit is None:
out = '{:^15s}'.format('-')
else:
out = '{:^15.2f}'.format(digit)
return out
def format_variable_name(self, i):
out = "x[" + str(i + 1) + "]"
return out
def print_hline(self, sym):
print
sys.stdout.write('+')
for i in range(0, (self.task.n + 4)*16 - 1):
sys.stdout.write(sym)
sys.stdout.write('+')
print
def out_column_str(self, s):
sys.stdout.write('{:^15s}|'.format(s))
def print_plan(self):
""" Вывод плана на экран """
# вывод коэффициентов для симплексной таблицы
# пропустим два стоблца
sys.stdout.write('{:>33}'.format('|'))
for i in range(0, self.task.n):
self.out_column_str(self.format_M(self.task.C[i]))
self.print_hline('-')
# шапка таблицы
sys.stdout.write('|')
self.out_column_str('C[i]')
self.out_column_str('B')
for i in range(0, self.task.n):
self.out_column_str(self.format_variable_name(i))
self.out_column_str('ai[0]')
self.out_column_str('sigma')
self.print_hline('=')
first = True
for i in range(0, self.task.m):
if first:
first = False
else:
print
sys.stdout.write('|')
self.out_column_str(self.format_M(self.task.C[self.B[i]]))
self.out_column_str(self.format_variable_name(self.B[i]))
for j in range(0, self.task.n):
self.out_column_str(self.format_M(self.A[i][j]))
self.out_column_str(self.format_M(self.ai0[i]))
self.out_column_str(self.format_M(self.sigma[i]))
self.print_hline('-')
sys.stdout.write('|')
self.out_column_str("")
self.out_column_str("alpha_j")
for i in range(0, self.task.n):
self.out_column_str(self.format_M(self.alpha[i]))
self.out_column_str(self.format_M(self.z))
self.out_column_str("Z")
self.print_hline('=')
print
``` |
{
"source": "a13xg0/togu_crypt_lab1",
"score": 3
} |
#### File: a13xg0/togu_crypt_lab1/i_cipher_provider.py
```python
import abc
class ICipherProvider():
""" Cypher classes interface """
@abc.abstractmethod
def encrypt(self, block):
""" Encrypts block, returns encrypted data
@param: block string The block with data to encrypt
"""
raise NotImplementedError
def decrypt(self, cipher):
""" Decrypts block, returns decrypted data
@param: block string The block with cipher text
"""
raise NotImplementedError
```
#### File: a13xg0/togu_crypt_lab1/table_permutation_cipher.py
```python
from i_cipher_provider import ICipherProvider
from table_permutation_key_provider import TablePermutationKeyProvider
class TablePermutationCipher(ICipherProvider):
""" Table Permutation cipher provider"""
def __init__(self, key_provider):
""" Initializes class with specific key
@param key_provider IKeyProvider key for operation
"""
self._key_provider = key_provider
def encrypt(self, block):
""" Encrypt given block with table permutation cipher
@param block string data to encrypt
"""
key_1 = self._key_provider.get_key(TablePermutationKeyProvider.KEY_1)
key_2 = self._key_provider.get_key(TablePermutationKeyProvider.KEY_2)
block_length = len(key_2)
rows = len(key_1)
tbl = [None] * rows
# fill table
for i in range(rows):
tbl[key_1[i] - 1] = block[i * block_length: (i + 1) * block_length]
res = ""
# write cypher
for i in range(block_length):
for j in range(rows):
res += tbl[j][key_2[i] - 1]
return res
def decrypt(self, cipher):
""" Decrypt given cipher with table permutation cipher
@param cipher string data to decrypt
"""
key_1 = self._key_provider.get_key(TablePermutationKeyProvider.KEY_1)
key_2 = self._key_provider.get_key(TablePermutationKeyProvider.KEY_2)
block_length = len(key_2)
rows = len(key_1)
tbl = [[None for i in range(block_length)] for j in range(rows)]
# fill table
for j in range(block_length):
for i in range(rows):
tbl[i][key_2[j] - 1] = cipher[j * rows + i]
res = ""
# read table
for i in range(rows):
res += "".join(tbl[key_1[i] -1])
return res
```
#### File: togu_crypt_lab1/tests/test_table_permutation_cipher.py
```python
import unittest
from mock import patch, mock_open
from table_permutation_cipher import TablePermutationCipher
from table_permutation_key_provider import TablePermutationKeyProvider
from i_key_provider import IKeyProvider
class FakeKeyProvider(IKeyProvider):
def __init__(self, key1, key2):
self._key1 = key1
self._key2 = key2
def get_key(self, key_property):
if key_property == TablePermutationKeyProvider.KEY_1:
return self._key1
if key_property == TablePermutationKeyProvider.KEY_2:
return self._key2
class PermutationCipherTestCase(unittest.TestCase):
def getKey(self):
return FakeKeyProvider([5, 3, 1, 2, 4, 6], [3, 4, 1, 2])
def test_init_creation_initializationCorrect(self):
cipher = TablePermutationCipher("some provider")
self.assertEqual(cipher._key_provider, "some provider", "invalid provider assignment")
def test_encrypt_FIO_TruncatedCipher(self):
cipher = TablePermutationCipher(self.getKey())
res = cipher.encrypt(u"ГОРБАЧ_АЛЕКСАНДР_ВИТАЛЬЕВИЧ")
self.assertEqual(u"КД_ИРЬСРАТБЕЛАА_ГАЕНЧВОЛ", res, "error in cypher algorithm")
def test_decrypt_Cipher_RightResponse(self):
cipher = TablePermutationCipher(self.getKey())
res = cipher.decrypt(u"КД_ИРЬСРАТБЕЛАА_ГАЕНЧВОЛ")
self.assertEqual(u"ГОРБАЧ_АЛЕКСАНДР_ВИТАЛЬЕ", res, "error in cypher algorithm")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "a13xg0/uttlv",
"score": 3
} |
#### File: uttlv/uttlv/encoder.py
```python
from __future__ import annotations
from binascii import hexlify
class DefaultEncoder(object):
def default(self, obj):
try:
return obj.to_byte_array()
except:
raise TypeError('Invalid type')
def to_string(self, obj, offset=0, use_names=False):
try:
return obj.tree(offset + obj.indent, use_names)
except:
pass
return str(obj)
def parse(self, obj, cls=None):
try:
cls.parse_array(obj)
return cls
except:
pass
return obj
class IntEncoder(DefaultEncoder):
def default(self, obj):
if isinstance(obj, int):
return obj.to_bytes(4, byteorder='big')
return super().default(obj)
def parse(self, obj, cls=None):
return int.from_bytes(obj, byteorder='big')
class AsciiEncoder(DefaultEncoder):
def default(self, obj):
if isinstance(obj, str):
return obj.encode('ascii')
return super().default(obj)
def parse(self, obj, cls=None):
return obj.decode('ascii')
class BytesEncoder(DefaultEncoder):
def default(self, obj):
if isinstance(obj, bytes):
return obj
return super().default(obj)
def to_string(self, obj, offset=0, use_names=False):
return str(hexlify(obj), 'ascii')
def parse(self, obj, cls=None):
return obj
class Utf8Encoder(DefaultEncoder):
def default(self, obj):
if isinstance(obj, str):
return obj.encode('utf8')
return super().default(obj)
def parse(self, obj, cls=None):
return obj.decode('utf8')
class Utf16Encoder(DefaultEncoder):
def default(self, obj):
if isinstance(obj, str):
return obj.encode('utf16')
return super().default(obj)
def parse(self, obj, cls=None):
return obj.decode('utf16')
class Utf32Encoder(DefaultEncoder):
def default(self, obj):
if isinstance(obj, str):
return obj.encode('utf32')
return super().default(obj)
def parse(self, obj, cls=None):
return obj.decode('utf32')
``` |
{
"source": "a13xk13m/covidcg",
"score": 3
} |
#### File: cg_server/download/metadata.py
```python
import pandas as pd
import psycopg2
from flask import make_response
from psycopg2 import sql
from cg_server.config import config
from cg_server.constants import constants
from cg_server.query.selection import create_sequence_temp_table
def download_metadata(conn, req):
with conn.cursor() as cur:
temp_table_name = create_sequence_temp_table(cur, req)
# Fields that the user wants
selected_fields = req.get("selected_fields", [])
snv_format = req.get("snv_format", constants["SNV_FORMAT"]["POS_REF_ALT"])
sequence_cols = [
"Accession ID",
"collection_date",
"submission_date",
]
sequence_cols_expr = [
sql.SQL("q.{}").format(sql.Identifier(col)) for col in sequence_cols
]
metadata_joins = []
# Location columns
for col in list(constants["GEO_LEVELS"].values()):
if col not in selected_fields:
continue
sequence_cols.append(col)
sequence_cols_expr.append(sql.SQL("loc.{}").format(sql.Identifier(col)))
for grouping in config["group_cols"].keys():
if grouping not in selected_fields:
continue
sequence_cols.append(grouping)
sequence_cols_expr.append(sql.SQL("q.{}").format(sql.Identifier(grouping)))
for field in config["metadata_cols"].keys():
if field not in selected_fields:
continue
sequence_cols.append(field)
sequence_cols_expr.append(
sql.SQL(
"""
{metadata_table_name}."value" as {field}
"""
).format(
metadata_table_name=sql.Identifier("metadata_" + field),
field=sql.Identifier(field),
)
)
metadata_joins.append(
sql.SQL(
"""
INNER JOIN {metadata_table_name} {metadata_table_name}
ON q.{field} = {metadata_table_name}."id"
"""
).format(
metadata_table_name=sql.Identifier("metadata_" + field),
field=sql.Identifier(field),
)
)
for snp_field in ["dna", "gene_aa", "protein_aa"]:
if snp_field not in selected_fields:
continue
sequence_cols.append(snp_field + "_snp")
sequence_cols_expr.append(
sql.SQL("qq.{}").format(sql.Identifier(snp_field + "_snp"))
)
# CTEs and evaluating the metadata joins separately
# from the SNV joins speeds this up by a lot
snv_agg_field = "snp_str"
if snv_format == constants["SNV_FORMAT"]["POS_REF_ALT"]:
snv_agg_field = "snp_str"
elif snv_format == constants["SNV_FORMAT"]["REF_POS_ALT"]:
snv_agg_field = "snv_name"
query = sql.SQL(
"""
WITH dss AS (
SELECT
q."id" as "sequence_id",
array_to_string(array_agg(ds.{snv_agg_field}), ';') as "snp"
FROM {temp_table_name} q
INNER JOIN "sequence_dna_snp" sds ON q."id" = sds."sequence_id"
INNER JOIN "dna_snp" ds ON sds."snp_id" = ds."id"
GROUP BY q."id"
),
gass AS (
SELECT
q."id" as "sequence_id",
array_to_string(array_agg(gas.{snv_agg_field}), ';') as "snp"
FROM {temp_table_name} q
INNER JOIN "sequence_gene_aa_snp" sgas ON q."id" = sgas."sequence_id"
INNER JOIN "gene_aa_snp" gas ON sgas."snp_id" = gas."id"
GROUP BY q."id"
),
pass AS (
SELECT
q."id" as "sequence_id",
array_to_string(array_agg(pas.{snv_agg_field}), ';') as "snp"
FROM {temp_table_name} q
INNER JOIN "sequence_protein_aa_snp" spas ON q."id" = spas."sequence_id"
INNER JOIN "protein_aa_snp" pas ON spas."snp_id" = pas."id"
GROUP BY q."id"
),
qq AS (
SELECT
q."id",
dss."snp" as "dna_snp",
gass."snp" as "gene_aa_snp",
pass."snp" as "protein_aa_snp"
FROM {temp_table_name} q
INNER JOIN dss ON q."id" = dss."sequence_id"
INNER JOIN gass ON q."id" = gass."sequence_id"
INNER JOIN pass ON q."id" = pass."sequence_id"
)
SELECT
{sequence_cols_expr}
FROM {temp_table_name} q
INNER JOIN "location" loc ON q."location_id" = loc."id"
{metadata_joins}
JOIN qq ON qq."id" = q."id"
"""
).format(
snv_agg_field=sql.Identifier(snv_agg_field),
temp_table_name=sql.Identifier(temp_table_name),
sequence_cols_expr=sql.SQL(",").join(sequence_cols_expr),
metadata_joins=sql.SQL("\n").join(metadata_joins),
)
# print(query)
cur.execute(query)
res_df = pd.DataFrame.from_records(cur.fetchall(), columns=sequence_cols,)
return make_response(res_df.to_csv(index=False), 200, {"Content-Type": "text/csv"})
```
#### File: server/cg_server/test_connpool.py
```python
from functools import wraps
def raiseDatabaseError(kwargs):
def decorator_test_connpool(func):
@wraps(func)
def wrapper_test_connpool(*args, **kwargs):
return func(*args, **kwargs)
return wrapper_test_connpool
return decorator_test_connpool
```
#### File: phylotree/scripts/representative_seqs.py
```python
import pandas as pd
import numpy as np
import json
from datetime import date
from tempfile import NamedTemporaryFile
def date2float(isodate):
""" Convert ISO date string to float (years) """
year, month, day = map(int, isodate.split("-"))
dt = date(year, month, day)
origin = date(dt.year, 1, 1)
td = (dt - origin).days
return dt.year + td / 365.25
def get_representative_seqs(
case_data_path,
metadata_map_path,
location_map_path,
ref_seq_path,
fasta_out_path,
datefile_out_path,
table_out_path,
):
# Load DNA mutation ID map
with open(metadata_map_path, "r") as fp:
metadata_map = json.loads(fp.read())
dna_snp = metadata_map["dna_snp"]
id_to_dna_snp = {v: k for k, v in dna_snp.items()}
# Load location map
location_map = pd.read_json(location_map_path)
# Load data and select a representative sequence for each lineage
# by choosing the last seen sequence from each lineage
df = pd.read_json(case_data_path)
df["collection_date"] = pd.to_datetime(df["collection_date"])
df = df.join(location_map, on="location_id")
# https://github.com/cov-lineages/pango-designation
# See: DOI 10.1038/s41564-020-0770-5, 10.1093/ve/veab064
pango_lineages = pd.read_csv(
"https://github.com/cov-lineages/pango-designation/raw/master/lineages.csv"
)
reps = (
df.loc[
(df["collection_date"] >= pd.to_datetime("2019-12-15"))
& (df["collection_date"] <= pd.to_datetime(date.today()))
& (
df["virus_name"]
.str.replace("hCoV-19/", "")
.isin(pango_lineages["taxon"])
)
]
.drop_duplicates("lineage", keep="first")[
["Accession ID", "collection_date", "lineage", "dna_snp_str"]
]
.set_index("Accession ID")
# Join first and last collection dates
.join(
(
df.loc[
(df["collection_date"] > pd.to_datetime("2019-12-15"))
& (df["collection_date"] < pd.to_datetime(date.today()))
]
.groupby("lineage")
.agg(
date_min=("collection_date", np.min),
date_max=("collection_date", np.max),
)
),
on="lineage",
)
# Join region with the most counts of this lineage
.join(
(
df.groupby(["lineage", "region"], as_index=False)[["Accession ID"]]
.agg("count")
.groupby("lineage")
.apply(lambda x: x["region"].values[np.argmax(x["Accession ID"])])
.rename("region_most_common")
),
on="lineage",
)
)
# Load reference sequence
with open(ref_seq_path, "r") as fp:
ref = json.loads(fp.read())
ref_seq = ref["ref_seq"]
# For each representative sequence, use its DNA mutations to reconstruct its
# "MSA" genome from the reference genome
# Ignore insertions, since this would require a true MSA
reps["sequence"] = ref_seq
for accession_id, row in reps.iterrows():
dna_snps = row["dna_snp_str"]
seq = list(ref_seq)
for snp in dna_snps:
snp = id_to_dna_snp[snp].split("|")
pos = int(snp[0])
ref = snp[1]
alt = snp[2]
# Skip insertions
if ref == "-" or len(alt) > len(ref):
continue
# Deletions
if alt == "-":
seq[pos - 1 : (pos - 1) + len(ref)] = ["-"] * len(ref)
# Mutations
else:
seq[pos - 1 : (pos - 1) + len(ref)] = list(alt)
reps.loc[accession_id, "sequence"] = "".join(seq)
# Write "MSA" sequences as a FASTA file
with open(fasta_out_path, "w") as fp:
for accession_id, row in reps.iterrows():
fp.write(">{}\n{}\n".format(accession_id, row["sequence"]))
# Write files for treetime
# Extract dates from sequence headers
reps["collection_date"] = reps["collection_date"].astype(str)
with open(datefile_out_path, "w") as fp:
fp.write("name,date\n")
for accession_id, row in reps.iterrows():
# Date format should be fine: https://github.com/neherlab/treetime#metadata-and-date-format
fp.write("{},{}\n".format(accession_id, date2float(row["collection_date"])))
fp.close()
# Write dataframe for future
reps.drop(columns=["dna_snp_str"]).to_csv(table_out_path)
```
#### File: workflow_main/scripts/build_location_tree.py
```python
import json
import pandas as pd
from scripts.util import human_format
def build_location_tree(case_data, location_map, emoji_map_file, geo_select_tree_out):
"""Build tree for ReactDropdownTreeSelect
data
Type: Object or Array
Data for rendering the tree select items. The object requires the following structure:
{
label, // required: Checkbox label
value, // required: Checkbox value
children, // optional: Array of child objects
checked, // optional: Initial state of checkbox. if true, checkbox is selected and corresponding pill is rendered.
disabled, // optional: Selectable state of checkbox. if true, the checkbox is disabled and the node is not selectable.
expanded, // optional: If true, the node is expanded (children of children nodes are not expanded by default unless children nodes also have expanded: true).
className, // optional: Additional css class for the node. This is helpful to style the nodes your way
tagClassName, // optional: Css class for the corresponding tag. Use this to add custom style the pill corresponding to the node.
actions, // optional: An array of extra action on the node (such as displaying an info icon or any custom icons/elements)
dataset, // optional: Allows data-* attributes to be set on the node and tag elements
isDefaultValue, // optional: Indicate if a node is a default value. When true, the dropdown will automatically select the node(s) when there is no other selected node. Can be used on more than one node.
... // optional: Any extra properties that you'd like to receive during `onChange` event
}
The action object requires the following structure:
{
className, // required: CSS class for the node. e.g. `fa fa-info`
title, // optional: HTML tooltip text
text, // optional: Any text to be displayed. This is helpful to pass ligatures if you're using ligature fonts
... // optional: Any extra properties that you'd like to receive during `onChange` event
}
An array renders a tree with multiple root level items whereas an object renders a tree with a single root element (e.g. a Select All root node).
Example:
const data = {
label: 'search me',
value: 'searchme',
children: [
{
label: 'search me too',
value: 'searchmetoo',
children: [
{
label: 'No one can get me',
value: 'anonymous',
},
],
},
],
}
"""
df = pd.read_json(case_data).set_index("Accession ID")
with open(location_map, "r") as fp:
location_map_df = pd.DataFrame(json.loads(fp.read()))
# Join location data back to main dataframe
df = df.join(location_map_df, on="location_id")
# Set unspecified locations to None so that they don't get
# caught up in the groupby
df.loc[df["region"] == "-1", "region"] = None
df.loc[df["country"] == "-1", "country"] = None
df.loc[df["division"] == "-1", "division"] = None
df.loc[df["location"] == "-1", "location"] = None
# Count sequences per grouping level
region_counts = dict(df.groupby("region")["location_id"].count())
country_counts = dict(df.groupby(["region", "country"])["location_id"].count())
division_counts = dict(
df.groupby(["region", "country", "division"])["location_id"].count()
)
location_counts = dict(
df.groupby(["region", "country", "division", "location"])["location_id"].count()
)
# Load country -> emoji map
emoji_map = pd.read_excel(emoji_map_file, skiprows=1)
# Expand country aliases, remove whitespace from each alias
emoji_map["aliases"] = (
emoji_map["aliases"].str.split(",").apply(lambda x: [y.strip() for y in x])
)
# Root node
select_tree = {"label": "All", "value": "All", "children": []}
for i, loc in location_map_df.iterrows():
# Add region node
if loc["region"] == "-1":
continue
region_node = [
c for c in select_tree["children"] if c["value"] == loc["region"]
]
if region_node:
region_node = region_node[0]
else:
region_node = {
"label": loc["region"],
"value": loc["region"],
"level": "region",
"location_id": i,
"actions": [
{
"className": "fa fa-info",
"title": str(region_counts[loc["region"]]) + " sequences",
"text": human_format(region_counts[loc["region"]]),
}
],
"children": [],
}
select_tree["children"].append(region_node)
# Add country --> region
if loc["country"] == "-1":
continue
country_node = [
c for c in region_node["children"] if c["value"] == loc["country"]
]
if country_node:
country_node = country_node[0]
else:
# Look for an emoji for this country
country_emoji = ""
emoji_entry = emoji_map.loc[
emoji_map["aliases"].apply(lambda x: loc["country"] in x), :
]
# Fill the country emoji, if it exists
if len(emoji_entry) == 1:
country_emoji = emoji_entry.iat[0, 1] + " "
country_node = {
"label": country_emoji + loc["country"],
"value": loc["country"],
"region": loc["region"],
"level": "country",
"location_id": i,
"actions": [
{
"className": "fa fa-info",
"title": str(country_counts[(loc["region"], loc["country"])])
+ " sequences",
"text": human_format(
country_counts[(loc["region"], loc["country"])]
),
}
],
"children": [],
}
region_node["children"].append(country_node)
# Add division --> country
if loc["division"] == "-1":
continue
division_node = [
c for c in country_node["children"] if c["value"] == loc["division"]
]
if division_node:
division_node = division_node[0]
else:
division_node = {
"label": loc["division"],
"value": loc["division"],
"region": loc["region"],
"country": loc["country"],
"level": "division",
"location_id": i,
"actions": [
{
"className": "fa fa-info",
"title": str(
division_counts[
(loc["region"], loc["country"], loc["division"])
]
)
+ " sequences",
"text": human_format(
division_counts[
(loc["region"], loc["country"], loc["division"])
]
),
}
],
"children": [],
}
country_node["children"].append(division_node)
# Add location --> division
if loc["location"] == "-1":
continue
location_node = [
c for c in division_node["children"] if c["value"] == loc["location"]
]
if location_node:
location_node = location_node[0]
else:
location_node = {
"label": loc["location"],
"value": loc["location"],
"region": loc["region"],
"country": loc["country"],
"division": loc["division"],
"level": "location",
"location_id": i,
"actions": [
{
"className": "fa fa-info",
"title": str(
location_counts[
(
loc["region"],
loc["country"],
loc["division"],
loc["location"],
)
]
)
+ " sequences",
"text": human_format(
location_counts[
(
loc["region"],
loc["country"],
loc["division"],
loc["location"],
)
]
),
}
],
"children": [],
}
division_node["children"].append(location_node)
with open(geo_select_tree_out, "w") as fp:
fp.write(json.dumps(select_tree))
```
#### File: workflow_main/scripts/get_cdc_vocs.py
```python
import re
import argparse
import json
import requests
from bs4 import BeautifulSoup
def get_cdc_vocs():
url = ('https://www.cdc.gov/coronavirus/2019-ncov/'
'variants/variant-info.html')
level_order = ['VOI', 'VOC']
variant_list = []
lineage_pattern = re.compile('([A-Z]+([.]+\\d+)+([.]+\\d+))')
vocPage = requests.get(url)
soup = BeautifulSoup(vocPage.content, 'html.parser')
# Find all tables (VOI, VOC)
variantTables = soup.find_all('div', class_='step-table m-3')
level_ind = 0
for table in variantTables:
if level_ind >= len(level_order):
# Break if there is a table we are not interested in
break
level = level_order[level_ind]
rowgroup = table.div
rows = rowgroup.find_all('div', class_='mb-0 pt-3')
for row in rows:
variant_row = list(row.stripped_strings)
for col in variant_row:
for match in lineage_pattern.findall(col):
variant = {'name': match[0],
'level': level}
variant_list.append(variant)
level_ind += 1
return variant_list
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", required=True, type=str,
help="Path to output file")
args = parser.parse_args()
variant_list = get_cdc_vocs()
with open(args.output, 'w') as fp:
fp.write(json.dumps(variant_list, indent=2))
if __name__ == "__main__":
main()
```
#### File: workflow_main/scripts/get_ecdc_vocs.py
```python
import argparse
import json
import requests
from bs4 import BeautifulSoup
def get_ecdc_vocs():
url = 'https://www.ecdc.europa.eu/en/covid-19/variants-concern'
level_order = ['VOC', 'VOI', 'Other']
variant_list = []
vocPage = requests.get(url)
soup = BeautifulSoup(vocPage.content, 'html.parser')
# Find all tables (VOC, VOI, and Other)
variantTables = soup.find_all('tbody')
level_ind = 0
for table in variantTables:
if level_ind >= len(level_order):
# Break if there is a table we are not interested in
break
level = level_order[level_ind]
for row in table.find_all('tr'):
name = list(row.find_all('td')[1].stripped_strings)[0]
variant = {'name': name, 'level': level}
variant_list.append(variant)
level_ind += 1
return variant_list
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", required=True, type=str,
help="Path to output file")
args = parser.parse_args()
variant_list = get_ecdc_vocs()
with open(args.output, 'w') as fp:
fp.write(json.dumps(variant_list, indent=2))
if __name__ == "__main__":
main()
```
#### File: workflow_rsv_genbank_ingest/scripts/chunk_data.py
```python
import csv
import datetime
import gzip
import json
import multiprocessing as mp
import pandas as pd
import sys
from collections import defaultdict
from functools import partial
from pathlib import Path
csv.field_size_limit(sys.maxsize)
def write_sequences_day(fasta_out_path, seqs):
# Mode 'at' is append, in text mode
with gzip.open(fasta_out_path, "at") as fp_out:
for seq in seqs:
fp_out.write(">" + seq[0] + "\n" + seq[1] + "\n")
def chunk_data(data_feed, out_fasta, out_metadata, chunk_size=100000, processes=1):
"""Split up the data feed's individual objects into metadata and fasta files. Chunk the fasta files so that every day we only reprocess the subset of fasta files that have changed. The smaller the chunk size, the more efficient the updates, but the more files on the filesystem.
On a 48-core workstation with 128 GB RAM, aligning 200 sequences takes about 10 minutes, and this is more acceptable than having to align 1000 sequences, which takes ~1 hour. We end up with hundreds of files, but the filesystem seems to be handling it well.
Parameters
----------
data_feed: str
- Path to data feed csv file
out_fasta: str
- Path to fasta output directory
out_metadata: str
- Path to metadata.csv output file
chunk_size: int
- Number of records to hold in RAM before flushing to disk
processes: int
- Number of processes to spawn when writing to disk
Returns
-------
None
"""
output_path = Path(out_fasta)
# Make the output directory, if it hasn't been made yet
# Snakemake won't make the directory itself, since it's a special
# directory output
if not output_path.exists():
output_path.mkdir(exist_ok=True)
else:
# Erase all files in the output directory
for fasta_file in output_path.iterdir():
if fasta_file.is_file():
fasta_file.unlink()
# Keep track of how far we're along the current chunk
chunk_i = 0
# Store metadata entries as a list of dictionaries, for now
# We'll wrap it in a pandas DataFrame later for easier serialization
metadata_df = []
def flush_chunk(fasta_by_subm_date):
with mp.get_context("spawn").Pool(processes=processes) as pool:
for date, seqs in fasta_by_subm_date.items():
# Open the output fasta file for this date chunk
fasta_out_path = str(output_path / (date + ".fa.gz"))
pool.apply_async(partial(write_sequences_day, fasta_out_path, seqs))
pool.close()
pool.join()
with open(data_feed, "r", newline="") as fp_in:
# Open up the initial fasta file for the first chunk
fasta_by_subm_date = defaultdict(list)
line_counter = 0
feed_reader = csv.DictReader(fp_in, delimiter=",", quotechar='"')
for row in feed_reader:
# Flush results if chunk is full
if chunk_i == chunk_size:
flush_chunk(fasta_by_subm_date)
# Reset chunk counter
chunk_i = 0
# Reset sequence dictionary
fasta_by_subm_date = defaultdict(list)
# Add to metadata list
metadata_df.append({k: row[k] for k in row.keys() if k != "sequence"})
# Store sequence in dictionary
# Chop off the "Z" at the end of the submission time string, then parse
# as an ISO datetime format, then return just the year-month-day
subm_date = datetime.datetime.fromisoformat(row["submitted"][:-1]).strftime(
"%Y-%m-%d"
)
fasta_by_subm_date[subm_date].append(
(row["genbank_accession"], row["sequence"])
)
# Iterate the intra-chunk counter
chunk_i += 1
line_counter += 1
# Flush the last chunk
flush_chunk(fasta_by_subm_date)
# Cast the list of dictionaries (list of metadata entries) into a pandas
# DataFrame, and then serialize it to disk
# Do this step since pandas can handle some special serialization options
# that I didn't want to implement manually (such as wrapping certain strings
# in double quotes)
metadata_df = pd.DataFrame(metadata_df)
metadata_df.to_csv(out_metadata, index=False)
``` |
{
"source": "a1401358759/my_site",
"score": 2
} |
#### File: management/commands/update_data.py
```python
from django.core.management.base import BaseCommand
from article.models import Article
class Command(BaseCommand):
def update_data(self):
count = 0
articles = Article.objects.all()
for article in articles:
article.last_update = article.publish_time
article.save(update_fields=['last_update'])
count += 1
print('刷新完成,共更新%d条数据。' % count)
def handle(self, *args, **options):
self.update_data()
```
#### File: my_site/manager/forms.py
```python
from django import forms
from django.core.exceptions import ValidationError
from utils.dlibs.forms.validators import email_validator
from article.constants import EditorKind, BlogStatus, CarouselImgType
class SearchBlogForm(forms.Form):
title = forms.CharField(label='博客标题', required=False)
class LoginForm(forms.Form):
user_name = forms.CharField(label="用户名", max_length=128, widget=forms.TextInput(attrs={'class': 'form-control'}))
password = forms.CharField(label="密码", max_length=256, widget=forms.PasswordInput(attrs={'class': 'form-control'}))
class AddFriendLinkForm(forms.Form):
edit_id = forms.IntegerField(label='link_id', required=False)
name = forms.CharField(label='网站名称')
link = forms.CharField(label='网站链接')
avatar = forms.CharField(label='网站图标', required=False)
desc = forms.CharField(label='网站描述')
class AddAuthorForm(forms.Form):
item_id = forms.IntegerField(required=False)
name = forms.CharField(label='作者姓名')
email = forms.CharField(label='作者邮箱', required=False, validators=[email_validator])
website = forms.CharField(label='个人网站', required=False)
class AddMusicForm(forms.Form):
name = forms.CharField(label='音乐名称')
url = forms.CharField(label='音乐地址')
cover = forms.CharField(label='音乐封面')
artist = forms.CharField(label='演唱者', required=False)
class AddCarouselForm(forms.Form):
name = forms.CharField(label='图片名称')
description = forms.CharField(label='图片描述')
path = forms.FileField(label='图片', required=False)
link = forms.CharField(label='图片外链', required=False)
img_type = forms.ChoiceField(label='图片类型', choices=CarouselImgType.CHOICES)
weights = forms.IntegerField(label='图片权重')
class OperateOwnMessageForm(forms.Form):
summary = forms.CharField(label='简介')
message = forms.CharField(label='寄语')
editor = forms.ChoiceField(label='编辑器类型', choices=EditorKind.CHOICES)
class OperateBlogForm(forms.Form):
title = forms.CharField(label='标题', max_length=100)
author = forms.IntegerField(label='作者')
classification = forms.IntegerField(label='分类')
content = forms.CharField(label='内容')
count = forms.IntegerField(label='阅读量', required=False)
editor = forms.ChoiceField(label='编辑器类型', choices=EditorKind.CHOICES)
status = forms.ChoiceField(label='状态', choices=BlogStatus.CHOICES)
class ChangePasswordForm(forms.Form):
old_password = forms.CharField(label='旧密码', max_length=128, widget=forms.PasswordInput())
new_password = forms.CharField(label='新密码', max_length=128, widget=forms.PasswordInput())
confirm_password = forms.CharField(label='确认新密码', required=True, max_length=128, widget=forms.PasswordInput())
def clean_confirm_password(self):
if self.cleaned_data['new_password'] != self.cleaned_data['confirm_password']:
raise ValidationError(message='请确认两次输入的新密码一致')
return self.cleaned_data['confirm_password']
class UpdateBlogStatusForm(forms.Form):
blog_id = forms.IntegerField(label='博客id')
status = forms.ChoiceField(label='状态', choices=BlogStatus.CHOICES)
```
#### File: dlibs/forms/base.py
```python
from django import forms
from utils.dlibs.forms.constants import FormConstants
class BaseAdminModelForm(forms.ModelForm):
'''admin基础model form类,admin中所有使用model form的都要直接继承此类
扩展功能:
1. 增加is_update属性,新建model对象时为False,更新时为True
'''
def __init__(self, *args, **kwargs):
self.is_update = kwargs.get('instance') is not None
super(BaseAdminModelForm, self).__init__(*args, **kwargs)
class BaseListForm(forms.Form):
"""用于获取列表的表单,包含page_num和page_size两个参数"""
page_num = forms.IntegerField(required=False)
page_size = forms.IntegerField(required=False)
def clean_page_num(self):
"""检查和处理页码"""
page_num = self.cleaned_data['page_num'] or 1
return page_num if page_num > 0 else 1
def clean_page_size(self):
"""检查和处理页面大小"""
page_size = self.cleaned_data['page_size'] or FormConstants.DEFAULT_PAGE_SIZE
return page_size if page_size > 0 else FormConstants.DEFAULT_PAGE_SIZE
```
#### File: dlibs/forms/validators.py
```python
import re
from django.core.validators import RegexValidator, EmailValidator
from django.core.exceptions import ValidationError
# --------------- 正则表达式 ------------------#
# 手机号:T开头的手机号(T替换1)用于自动化测试,校验通过,但不发短信
_white_num = '[T1][3456789]\d{9}'
mobile_rex = r"^(?:\+?86[- ]?|0)?(%s)$" % _white_num
MOBILE_RE = re.compile(mobile_rex)
# 手机验证码: 6位纯数字
VERIFY_CODE_RE = re.compile(r"^\d{6}$")
# 图形验证码: 4位纯字母
CAPTCHA_RE = re.compile(r"^\d{4}$")
# 身份证
ID_NUM_RE = re.compile(r'^[1-9]\d{5}[1-9]\d{3}((0\d)|(1[0-2]))(([0|1|2]\d)|3[0-1])\d{3}([0-9]|X|x)$')
# --------------- validators ------------------#
mobile_validator = RegexValidator(MOBILE_RE, '手机号格式不正确', 'invalid')
email_validator = EmailValidator('请输入正确的email', 'invalid')
verify_code_validator = RegexValidator(VERIFY_CODE_RE, '短信验证码格式不正确', 'invalid')
captcha_validator = RegexValidator(CAPTCHA_RE, '图片验证码格式不正确', 'invalid')
id_num_validator = RegexValidator(ID_NUM_RE, '身份证号码错误', 'invalid')
# --------------- 验证函数 ------------------#
def is_valid_mobile(mobile):
'''验证手机号
'''
if not mobile:
return False
try:
mobile_validator(mobile)
return True
except ValidationError:
return False
```
#### File: libs/error/errorcode.py
```python
class StatusCode(object):
'''错误码基础类
每个错误码有code, msg和msgcn三个属性,
三个属性同时支持StatusCode.code式属性方式获取,也支持StatusCode['code']式的字典key方式获取.
'''
def __init__(self, code, msg, msgcn=''):
'''
code -- 错误码, int
msg -- 英文错误信息, str
msgcn -- 中文错误信息, str
'''
self._code = int(code)
self._msg = msg
self._msgcn = msgcn
def __str__(self):
return self._msg
def __int__(self):
return self._code
def __ne__(self, other):
if hasattr(other, 'code'):
return self._code != other.code
else:
try:
return self._code != int(other)
except:
return self._code != other
def __eq__(self, other):
if hasattr(other, 'code'):
return self._code == other.code
else:
try:
return self._code == int(other)
except:
return self._code == other
def __getitem__(self, key):
if key == 'code':
return self._code
elif key == 'msg':
return self._msg
elif key == 'msgcn':
return self._msgcn
else:
raise KeyError
def __setitem__(self, key, value):
if key == 'code':
self._code = value
elif key == 'msg':
self._msg = value
elif key == 'msgcn':
self._msgcn = value
else:
raise KeyError
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def msg(self):
return self._msg
@msg.setter
def msg(self, value):
self._msg = value
@property
def msgcn(self):
return self._msgcn
@msgcn.setter
def msgcn(self, value):
self._msgcn = value
class CommonError(object):
'''
错误码,各系统需要继承此类,增加各自的业务错误码
规则:子系统编码(2位. 00表示各子系统通用错误码) + 错误编码(3位),共5位
示例:
class ERROR(CommonError):
ACCOUNT_BASE = 10000
CUSTOM_ERROR = StatusCode(ACCOUNT_BASE + 1, 'Some error msg.', '一些错误信息.')
'''
COMMON_BASE = 00000
SUCCESS = StatusCode(COMMON_BASE + 0, 'Successful.', '成功.')
UNKNOWN = StatusCode(COMMON_BASE + 1, 'Unknown error.', '未知错误.')
FAILED = StatusCode(COMMON_BASE + 2, 'Failed.', '失败.')
UPGRADING = StatusCode(COMMON_BASE + 3, 'System is upgrading.', '系统升级中.')
SERVER_TOO_BUSY = StatusCode(COMMON_BASE + 4, 'Server is too busy.', '服务器繁忙') # 限流
NOT_IMPLEMENTED = StatusCode(COMMON_BASE + 5, 'Not Implemented.', '功能尚未开放')
IN_BLACKLIST = StatusCode(COMMON_BASE + 6, 'Illegal, Denial of service.', '黑名单中,禁止访问') # 黑名单
SIGN_ERROR = StatusCode(COMMON_BASE + 7, 'Sign verification failed.', '签名不正确')
TIMESTAMP_EXPIRED = StatusCode(COMMON_BASE + 8, 'Timestamp expired.', '时间戳过期')
REQUEST_TOO_OFTEN = StatusCode(COMMON_BASE + 9, 'Request too often.', '请求太频繁')
PERMISSION_DENIED = StatusCode(COMMON_BASE + 10, 'Sorry, Permission Denied.', '权限不足')
PARAM_NOT_ENOUGH = StatusCode(COMMON_BASE + 11, 'Params not enough.', '参数不足')
PARAM_ERROR = StatusCode(COMMON_BASE + 12, 'Params error.', '参数错误')
NOT_FOUND = StatusCode(COMMON_BASE + 13, 'Not found.', '未找到')
NOT_LOGIN = StatusCode(COMMON_BASE + 14, 'Not login.', '未登录')
USER_DEACTIVE = StatusCode(COMMON_BASE + 15, 'User is deactive.', '用户被禁用')
WEIXIN_NOT_LOGIN = StatusCode(COMMON_BASE + 16, 'Weixin Not login.', '微信未登录')
SUPERUSER_PERMISSION_DENIED = StatusCode(COMMON_BASE + 17, 'Sorry,Superuser Permission Denied.', '超级管理员权限不足')
WALLET_NOT_ENOUGH = StatusCode(COMMON_BASE + 18, 'wallet not enough', '钱包余额不足')
```
#### File: libs/utils/mail.py
```python
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
from utils.libs.config import settings
def send_mail(subject, from_addr, to_addrs, content, text_type='plain', accept_language='zh-CN', accept_charset='ISO-8859-1,utf-8'):
'''向指定联系人发送html邮件
subject -- 主题
from_addr -- 发送人,注意:发送人不是可以随便指定的,有的邮件服务商拒绝发送发件人和实际账号不匹配的邮件
to_addrs -- 收件人。如果为list,则发给一批人,如果为string,则发送给一人
content -- 邮件内容字符串
text_type -- subtype of text/* MIME documents. 纯文本邮件为plain, html邮件为html
'''
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = from_addr
msg['To'] = ', '.join(to_addrs) if isinstance(to_addrs, list) else to_addrs
msg["Accept-Language"] = accept_language
msg["Accept-Charset"] = accept_charset
msg.attach(MIMEText(content, text_type, "utf-8"))
mail_conf = settings.MAIL_CONFIG
smtp = smtplib.SMTP_SSL(mail_conf['host'], mail_conf['port'], timeout=5)
smtp.login(mail_conf['user'], mail_conf['password'])
smtp.sendmail(from_addr, to_addrs, msg.as_string())
smtp.quit()
``` |
{
"source": "a1401358759/vmaig_blog",
"score": 2
} |
#### File: vmaig_blog/vmaig_auth/forms.py
```python
from django import forms
from vmaig_auth.models import VmaigUser
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import get_current_site
from django.utils.http import urlsafe_base64_encode
from django.utils.encoding import force_bytes
from django.core.mail import send_mail
import base64
import logging
logger = logging.getLogger(__name__)
# 参考自django.contrib.auth.forms.UserCreationForm
class VmaigUserCreationForm(forms.ModelForm):
# 错误信息
error_messages = {
'duplicate_username': u"此用户已存在.",
'password_mismatch': u"两次密码不相等.",
'duplicate_email': u'此email已经存在.'
}
# 错误信息 invalid 表示username不合法的错误信息,
# required 表示没填的错误信息
username = forms.RegexField(
max_length=30,
regex=r'^[\w.@+-]+$',
error_messages={
'invalid': u"该值只能包含字母、数字和字符@/./+/-/_",
'required': u"用户名未填"
}
)
email = forms.EmailField(
error_messages={
'invalid': u"email格式错误",
'required': u'email未填'}
)
password1 = forms.CharField(
widget=forms.PasswordInput,
error_messages={
'required': u"密码未填"
}
)
password2 = forms.CharField(
widget=forms.PasswordInput,
error_messages={
'required': u"确认密码未填"
}
)
class Meta:
model = VmaigUser
fields = ("username", "email")
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
VmaigUser._default_manager.get(username=username)
except VmaigUser.DoesNotExist:
return username
raise forms.ValidationError(
self.error_messages["duplicate_username"]
)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages["password_mismatch"]
)
return password2
def clean_email(self):
email = self.cleaned_data["email"]
# 判断是这个email 用户是否存在
try:
VmaigUser._default_manager.get(email=email)
except VmaigUser.DoesNotExist:
return email
raise forms.ValidationError(
self.error_messages["duplicate_email"]
)
def save(self, commit=True):
user = super(VmaigUserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["<PASSWORD>"])
if commit:
user.save()
return user
class VmaigPasswordRestForm(forms.Form):
# 错误信息
error_messages = {
'email_error': u"此用户不存在或者用户名与email不对应.",
}
# 错误信息 invalid 表示username不合法的错误信息,
# required 表示没填的错误信息
username = forms.RegexField(
max_length=30,
regex=r'^[\w.@+-]+$',
error_messages={
'invalid': u"该值只能包含字母、数字和字符@/./+/-/_",
'required': u"用户名未填"}
)
email = forms.EmailField(
error_messages={
'invalid': u"email格式错误",
'required': u'email未填'}
)
def clean(self):
username = self.cleaned_data.get('username')
email = self.cleaned_data.get('email')
if username and email:
try:
self.user = VmaigUser.objects.get(
username=username, email=email, is_active=True
)
except VmaigUser.DoesNotExist:
raise forms.ValidationError(
self.error_messages["email_error"]
)
return self.cleaned_data
def save(self, from_email=None, request=None,
token_generator=default_token_generator):
email = self.cleaned_data['email']
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
uid = base64.urlsafe_b64encode(
force_bytes(self.user.pk)
).rstrip(b'\n=')
token = token_generator.make_token(self.user)
protocol = 'http'
title = u"重置 {} 的密码".format(site_name)
message = "".join([
u"你收到这封信是因为你请求重置你在网站 {} 上的账户密码\n\n".format(
site_name
),
u"请访问该页面并输入新密码:\n\n",
"{}://{}/resetpassword/{}/{}/\n\n".format(
protocol, domain, uid, token
),
u"你的用户名,如果已经忘记的话: {}\n\n".format(
self.user.username
),
u"感谢使用我们的站点!\n\n",
u"{} 团队\n\n\n".format(site_name)
])
try:
send_mail(title, message, from_email, [self.user.email])
except Exception as e:
logger.error(
u'[UserControl]用户重置密码邮件发送失败:[{}]/[{}]'.format(
username, email
)
)
```
#### File: vmaig_blog/vmaig_comments/models.py
```python
from django.db import models
from django.conf import settings
from blog.models import Article
# Create your models here.
# 用来修改admin中显示的app名称,因为admin app 名称是用 str.title()显示的,所以修改str类的title方法就可以实现.
class string_with_title(str):
def __new__(cls, value, title):
instance = str.__new__(cls, value)
instance._title = title
return instance
def title(self):
return self._title
__copy__ = lambda self: self
__deepcopy__ = lambda self, memodict: self
class Comment(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=u'用户')
article = models.ForeignKey(Article, verbose_name=u'文章')
text = models.TextField(verbose_name=u'评论内容')
create_time = models.DateTimeField(u'创建时间', auto_now_add=True)
parent = models.ForeignKey('self', default=None, blank=True, null=True,
verbose_name=u'引用')
class Meta:
verbose_name_plural = verbose_name = u'评论'
ordering = ['-create_time']
app_label = string_with_title('vmaig_comments', u"评论管理")
def __unicode__(self):
return self.article.title + '_' + str(self.pk)
__str__ = __unicode__
``` |
{
"source": "a140262/emr-stream-demo",
"score": 2
} |
#### File: lib/cdk_infra/eks_service_account.py
```python
from aws_cdk import (
core,
aws_iam as iam
)
from aws_cdk.aws_eks import ICluster
from lib.util.manifest_reader import *
import os
class EksSAConst(core.Construct):
def __init__(self, scope: core.Construct, id:str, eks_cluster: ICluster, **kwargs,) -> None:
super().__init__(scope, id, **kwargs)
source_dir=os.path.split(os.environ['VIRTUAL_ENV'])[0]+'/source'
# //************************************v*************************************************************//
# //***************************** SERVICE ACCOUNT, RBAC and IAM ROLES *******************************//
# //****** Associating IAM role to K8s Service Account to provide fine-grain security control ******//
# //***********************************************************************************************//
# Cluster Auto-scaler
self._scaler_sa = eks_cluster.add_service_account('AutoScalerSa',
name='cluster-autoscaler',
namespace='kube-system'
)
_scaler_role = load_yaml_local(source_dir+'/app_resources/autoscaler-iam-role.yaml')
for statmt in _scaler_role:
self._scaler_sa.add_to_principal_policy(iam.PolicyStatement.from_json(statmt))
# ALB Ingress
self._alb_sa = eks_cluster.add_service_account('ALBServiceAcct',
name='alb-aws-load-balancer-controller',
namespace='kube-system'
)
_alb_role = load_yaml_local(source_dir+'/app_resources/alb-iam-role.yaml')
for statmt in _alb_role:
self._alb_sa.add_to_principal_policy(iam.PolicyStatement.from_json(statmt))
```
#### File: lib/cdk_infra/spark_permission.py
```python
from aws_cdk import (
core,
aws_iam as iam,
aws_emrcontainers as emrc
)
from aws_cdk.aws_eks import ICluster, KubernetesManifest, AwsAuth
from lib.util.manifest_reader import load_yaml_replace_var_local
import os
class SparkOnEksConst(core.Construct):
@property
def EMRVC(self):
return self.emr_vc.attr_id
@property
def EMRFargateVC(self):
return self.emr_vc_fg.attr_id
@property
def EMRExecRole(self):
return self._emr_exec_role.role_arn
def __init__(self,scope: core.Construct, id: str,
eks_cluster: ICluster,
code_bucket: str,
awsAuth: AwsAuth,
**kwargs) -> None:
super().__init__(scope, id, **kwargs)
source_dir=os.path.split(os.environ['VIRTUAL_ENV'])[0]+'/source'
# //****************************************************************************************//
# //************************** SETUP PERMISSION FOR OSS SPARK JOBS *************************//
# //******* create k8s namespace, service account, and IAM role for service account ********//
# //***************************************************************************************//
# create k8s namespace
etl_ns = eks_cluster.add_manifest('SparkNamespace',{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {
"name": "spark",
"labels": {"name":"spark"}
}
}
)
self._spark_sa = eks_cluster.add_service_account('NativeSparkSa',
name='nativejob',
namespace='spark'
)
self._spark_sa.node.add_dependency(etl_ns)
_spark_rb = eks_cluster.add_manifest('sparkRoleBinding',
load_yaml_replace_var_local(source_dir+'/app_resources/native-spark-rbac.yaml',
fields= {
"{{MY_SA}}": self._spark_sa.service_account_name
})
)
_spark_rb.node.add_dependency(self._spark_sa)
_native_spark_iam = load_yaml_replace_var_local(source_dir+'/app_resources/native-spark-iam-role.yaml',
fields={
"{{codeBucket}}": code_bucket
}
)
for statmnt in _native_spark_iam:
self._spark_sa.add_to_principal_policy(iam.PolicyStatement.from_json(statmnt))
# # //*************************************************************************************//
# # //******************** SETUP PERMISSION FOR EMR ON EKS *****************************//
# # //***********************************************************************************//
#################################
####### #######
####### EMR Namespace #######
####### #######
#################################
_emr_01_name = "emr"
emr_ns = eks_cluster.add_manifest('EMRNamespace',{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {
"name": _emr_01_name,
"labels": {"name": _emr_01_name}
}
}
)
_emr_02_name = "emrserverless"
emr_serverless_ns = eks_cluster.add_manifest('EMRFargateNamespace',{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {
"name": _emr_02_name,
"labels": {"name": _emr_01_name}
}
}
)
###########################################
####### #######
####### k8s role for EMR on EKS #######
####### #######
###########################################
_emr_rb = KubernetesManifest(self,'EMRRoleBinding',
cluster=eks_cluster,
manifest=load_yaml_replace_var_local(source_dir+'/app_resources/emr-rbac.yaml',
fields= {
"{{NAMESPACE}}": _emr_01_name,
},
multi_resource=True)
)
_emr_rb.node.add_dependency(emr_ns)
_emr_fg_rb = KubernetesManifest(self,'EMRFargateRoleBinding',
cluster=eks_cluster,
manifest=load_yaml_replace_var_local(source_dir+'/app_resources/emr-rbac.yaml',
fields= {
"{{NAMESPACE}}": _emr_02_name
},
multi_resource=True)
)
_emr_fg_rb.node.add_dependency(emr_serverless_ns)
# Create EMR on EKS job executor role
#######################################
####### #######
####### EMR Execution Role #######
####### #######
#######################################
self._emr_exec_role = iam.Role(self, "EMRJobExecRole", assumed_by=iam.ServicePrincipal("eks.amazonaws.com"))
# trust policy
_eks_oidc_provider=eks_cluster.open_id_connect_provider
_eks_oidc_issuer=_eks_oidc_provider.open_id_connect_provider_issuer
sub_str_like = core.CfnJson(self, "ConditionJsonIssuer",
value={
f"{_eks_oidc_issuer}:sub": f"system:serviceaccount:{_emr_01_name}:emr-containers-sa-*-*-{core.Aws.ACCOUNT_ID}-*"
}
)
self._emr_exec_role.assume_role_policy.add_statements(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=["sts:AssumeRoleWithWebIdentity"],
principals=[iam.OpenIdConnectPrincipal(_eks_oidc_provider, conditions={"StringLike": sub_str_like})])
)
aud_str_like = core.CfnJson(self,"ConditionJsonAudEMR",
value={
f"{_eks_oidc_issuer}:aud": "sts.amazon.com"
}
)
self._emr_exec_role.assume_role_policy.add_statements(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=["sts:AssumeRoleWithWebIdentity"],
principals=[iam.OpenIdConnectPrincipal(_eks_oidc_provider, conditions={"StringEquals": aud_str_like})]
)
)
# custom policy
_emr_iam = load_yaml_replace_var_local(source_dir+'/app_resources/emr-iam-role.yaml',
fields={
"{{codeBucket}}": code_bucket
}
)
for statmnt in _emr_iam:
self._emr_exec_role.add_to_policy(iam.PolicyStatement.from_json(statmnt))
############################################
####### #######
####### EMR virtual Cluster Server #######
####### #######
############################################
self.emr_vc = emrc.CfnVirtualCluster(self,"EMRCluster",
container_provider=emrc.CfnVirtualCluster.ContainerProviderProperty(
id=eks_cluster.cluster_name,
info=emrc.CfnVirtualCluster.ContainerInfoProperty(eks_info=emrc.CfnVirtualCluster.EksInfoProperty(namespace=_emr_01_name)),
type="EKS"
),
name="EMROnEKS"
)
self.emr_vc.node.add_dependency(self._emr_exec_role)
self.emr_vc.node.add_dependency(_emr_rb)
self.emr_vc_fg = emrc.CfnVirtualCluster(self,"EMRServerlessCluster",
container_provider=emrc.CfnVirtualCluster.ContainerProviderProperty(
id=eks_cluster.cluster_name,
info=emrc.CfnVirtualCluster.ContainerInfoProperty(eks_info=emrc.CfnVirtualCluster.EksInfoProperty(namespace=_emr_02_name)),
type="EKS"
),
name="EMROnEKSFargate"
)
self.emr_vc_fg.node.add_dependency(self._emr_exec_role)
self.emr_vc_fg.node.add_dependency(_emr_fg_rb)
``` |
{
"source": "a143753/AOJ",
"score": 3
} |
#### File: a143753/AOJ/0026.py
```python
import sys
b = [ list(0 for i in range(14)) for j in range(14)]
def s(b,x,y):
b[x][y] += 1
b[x+1][y] += 1
b[x-1][y] += 1
b[x][y+1] += 1
b[x][y-1] += 1
def m(b,x,y):
b[x-1][y-1] += 1
b[x ][y-1] += 1
b[x+1][y-1] += 1
b[x-1][y ] += 1
b[x ][y ] += 1
b[x+1][y ] += 1
b[x-1][y+1] += 1
b[x ][y+1] += 1
b[x+1][y+1] += 1
def l(b,x,y):
b[x ][y-2] += 1
b[x-1][y-1] += 1
b[x ][y-1] += 1
b[x+1][y-1] += 1
b[x-2][y ] += 1
b[x-1][y ] += 1
b[x ][y ] += 1
b[x+1][y ] += 1
b[x+2][y ] += 1
b[x-1][y+1] += 1
b[x ][y+1] += 1
b[x+1][y+1] += 1
b[x ][y+2] += 1
for line in sys.stdin:
d = list(map(int,line.split(",")))
c = d[2]
x = d[0] + 2
y = d[1] + 2
if c == 1:
s(b,x,y)
elif c == 2:
m(b,x,y)
elif c == 3:
l(b,x,y)
ttl = 0
num = 0
max = 0
for x in range(2,12):
for y in range(2,12):
ttl += 1
if b[x][y] == 0:
num += 1
if b[x][y] > max:
max = b[x][y]
print(num)
print(max)
```
#### File: a143753/AOJ/0085.py
```python
def act(t,m,l):
t = (t+m) % len(l);
del l[t-1]
return (t-1) % (len(l)+1)
while True:
i = raw_input().split()
n = int(i[0])
m = int(i[1])
if n == 0 and m == 0:
break
else:
l = range(1,n+1)
t = 0
for i in range(n-1):
tt = act(t,m,l);
t = tt;
print l[0]
```
#### File: a143753/AOJ/0163.py
```python
import math
t = [ [ None, 300, 500, 600, 700, 1350, 1650 ],
[ None, None, 350, 450, 600, 1150, 1500 ],
[ None, None, None, 250, 400, 1000, 1350 ],
[ None, None, None, None, 250, 850, 1300 ],
[ None, None, None, None, None, 600, 1150 ],
[ None, None, None, None, None, None, 500 ] ]
l = [ [ None, None, None, None, None, None, None ],
[ 6, None, None, None, None, None, None ],
[ 13, 7, None, None, None, None, None ],
[ 18, 12, 5, None, None, None, None ],
[ 23, 17, 10, 5, None, None, None ],
[ 43, 37, 30, 25, 20, None, None ],
[ 58, 52, 45, 40, 35, 15, None ] ]
def inside(h,m):
return h*60+m >= 17*60+30 and h*60+m <= 19*60+30
def discount(n):
return math.ceil(n / 2 / 50) * 50
while True:
d = int(input())
if d == 0:
break
[hd,md] = map(int,input().split())
a = int(input())
[ha,ma] = map(int,input().split())
tt = t[d-1][a-1]
ll = l[a-1][d-1]
if ( ( inside(hd,md) or inside(ha,ma) ) and (ll <= 40) ):
print(discount(tt))
else:
print(tt)
```
#### File: a143753/AOJ/0280.py
```python
def ans(n,s):
p = [[] for i in range(n)]
h = []
i = 0
while True:
if s == []:
break
c = s.pop(0)
if c == 'M':
if p[i] == []:
p[i] = [c]
else:
p[i].append(c)
elif c == 'L':
if p[i] == []:
p[i] = [c]
else:
p[i].append(c)
if h != []:
p[i] += h
h = []
elif c == 'S':
if h == []:
h = [c]
else:
h.append(c)
if p[i] != []:
h += p[i]
p[i] = []
i = (i+1) % n
pp = list(sorted(map(len,p)))
hh = len(h)
pp.append(hh)
return " ".join(map(str,pp))
while True:
n = int(input())
if n == 0:
break
s = list(input())
o = ans(n,s)
print(o)
```
#### File: a143753/AOJ/0355.py
```python
n = int(raw_input())
u = raw_input()
q = int(raw_input())
def cmd_set(q,u):
x = int(q[0])
y = int(q[1])
z = q[2]
h = u[:x-1]
t = u[y:]
zz= z*(y-x+1)
return h+zz+t
def cmd_comp(q, u):
a = int(q[0])
b = int(q[1])
c = int(q[2])
d = int(q[3])
s = u[a-1:b]
t = u[c-1:d]
if s < t:
print "s"
elif s > t:
print "t"
else:
print "e"
for i in range(q):
q = raw_input().split()
cmd = q[0]
q.pop(0)
if cmd == 'comp':
cmd_comp(q, u)
elif cmd == 'set':
u = cmd_set(q, u)
``` |
{
"source": "a1492dc/marketcap-spider",
"score": 3
} |
#### File: a1492dc/marketcap-spider/all-coinmarkcap.py
```python
import scrapy
class coin_item(scrapy.Item):
coin_url = scrapy.Field()
coin_name = scrapy.Field()
coin_code = scrapy.Field()
coin_markcap = scrapy.Field()
coin_price = scrapy.Field()
coin_volume24 = scrapy.Field()
class Spider(scrapy.Spider):
name = "all-coinmarkcap"
def start_requests(self):
urls = [
'https://coinmarketcap.com/all/views/all/',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
item = []
for coin in response.css("table#currencies-all tr"):
item = coin_item()
item["coin_url"] = coin.css('td span.currency-symbol a::attr(href)').extract(),
item["coin_name"] = coin.css('td a.currency-name-container::text').extract(),
item["coin_code"] = coin.css('td span.currency-symbol a::text').extract(),
item["coin_markcap"] = coin.css('td.market-cap::attr(data-usd)').extract(),
item["coin_price"] = coin.css('td a.price::attr(data-usd)').extract(),
item["coin_volume24"] = coin.css('td.circulating-supply a::attr(data-supply)').extract(),
yield item
``` |
{
"source": "a1600012888/arXiv2020-RIFE",
"score": 2
} |
#### File: a1600012888/arXiv2020-RIFE/inference_mp4_4x_parallel.py
```python
import os
import cv2
import torch
import argparse
import numpy as np
from tqdm import tqdm
from torch.nn import functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
torch.set_grad_enabled(False)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='Interpolation for a pair of images')
parser.add_argument('--video', dest='video', required=True)
parser.add_argument('--skip', dest='skip', action='store_true', help='whether to remove static frames before processing')
parser.add_argument('--fps', dest='fps', type=int, default=60)
parser.add_argument('--png', dest='png', action='store_true', help='whether to output png format outputs')
args = parser.parse_args()
from model.RIFE import Model
model = Model()
model.load_model('./train_log')
model.eval()
model.device()
videoCapture = cv2.VideoCapture(args.video)
fps = np.round(videoCapture.get(cv2.CAP_PROP_FPS))
success, frame = videoCapture.read()
h, w, _ = frame.shape
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
if args.png:
if not os.path.exists('output'):
os.mkdir('output')
else:
output = cv2.VideoWriter('{}_4x.mp4'.format(args.video[:-4]), fourcc, args.fps, (w, h))
cnt = 0
skip_frame = 1
def writeframe(I0, mid0, mid1, mid2, I1, p):
global cnt, skip_frame
for i in range(I0.shape[0]):
if p[i] > 0.2:
mid0[i] = I0[i]
mid1[i] = I0[i]
mid2[i] = I1[i]
if p[i] < 1e-3 and args.skip:
if skip_frame % 100 == 0:
print("Warning: Your video has {} static frames, skipping them may change the duration of the generated video.".format(skip_frame))
skip_frame += 1
continue
if args.png:
cv2.imwrite('output/{:0>7d}.png'.format(cnt), I0[i])
cnt += 1
cv2.imwrite('output/{:0>7d}.png'.format(cnt), mid0[i])
cnt += 1
cv2.imwrite('output/{:0>7d}.png'.format(cnt), mid1[i])
cnt += 1
cv2.imwrite('output/{:0>7d}.png'.format(cnt), mid2[i])
cnt += 1
else:
output.write(I0[i])
output.write(mid0[i])
output.write(mid1[i])
output.write(mid2[i])
ph = ((h - 1) // 32 + 1) * 32
pw = ((w - 1) // 32 + 1) * 32
padding = (0, pw - w, 0, ph - h)
tot_frame = videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)
print('{}.mp4, {} frames in total, {}FPS to {}FPS'.format(args.video[:-4], tot_frame, fps, args.fps))
pbar = tqdm(total=tot_frame)
img_list = []
img_list.append(frame)
while success:
success, frame = videoCapture.read()
if success:
img_list.append(frame)
if len(img_list) == 5 or (not success and len(img_list) > 1):
I0 = torch.from_numpy(np.transpose(img_list[:-1], (0, 3, 1, 2)).astype("float32") / 255.).to(device)
I1 = torch.from_numpy(np.transpose(img_list[1:], (0, 3, 1, 2)).astype("float32") / 255.).to(device)
p = (F.interpolate(I0, (16, 16), mode='bilinear', align_corners=False)
- F.interpolate(I1, (16, 16), mode='bilinear', align_corners=False)).abs()
I0 = F.pad(I0, padding)
I1 = F.pad(I1, padding)
mid1 = model.inference(I0, I1)
mid0 = model.inference(I0, mid1)
mid2 = model.inference(mid1, I1)
I0 = (((I0 * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1))).astype('uint8')
I1 = (((I1 * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1))).astype('uint8')
mid0 = (((mid0 * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1))).astype('uint8')
mid1 = (((mid1 * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1))).astype('uint8')
mid2 = (((mid2 * 255.).cpu().detach().numpy().transpose(0, 2, 3, 1))).astype('uint8')
writeframe(I0, mid0, mid1, mid2, I1, p.mean(3).mean(2).mean(1))
pbar.update(4)
img_list = img_list[-1:]
pbar.close()
output.release()
``` |
{
"source": "a1600012888/mmdetection3d",
"score": 3
} |
#### File: core/visualizer/open3d_vis.py
```python
import copy
import cv2
import numpy as np
import torch
from matplotlib import pyplot as plt
try:
import open3d as o3d
from open3d import geometry
except ImportError:
raise ImportError(
'Please run "pip install open3d" to install open3d first.')
def _draw_points(points,
vis,
points_size=2,
point_color=(0.5, 0.5, 0.5),
mode='xyz'):
"""Draw points on visualizer.
Args:
points (numpy.array | torch.tensor, shape=[N, 3+C]):
points to visualize.
vis (:obj:`open3d.visualization.Visualizer`): open3d visualizer.
points_size (int): the size of points to show on visualizer.
Default: 2.
point_color (tuple[float]): the color of points.
Default: (0.5, 0.5, 0.5).
mode (str): indicate type of the input points, avaliable mode
['xyz', 'xyzrgb']. Default: 'xyz'.
Returns:
tuple: points, color of each point.
"""
vis.get_render_option().point_size = points_size # set points size
if isinstance(points, torch.Tensor):
points = points.cpu().numpy()
points = points.copy()
pcd = geometry.PointCloud()
if mode == 'xyz':
pcd.points = o3d.utility.Vector3dVector(points[:, :3])
points_colors = np.tile(np.array(point_color), (points.shape[0], 1))
elif mode == 'xyzrgb':
pcd.points = o3d.utility.Vector3dVector(points[:, :3])
points_colors = points[:, 3:6]
# normalize to [0, 1] for open3d drawing
if not ((points_colors >= 0.0) & (points_colors <= 1.0)).all():
points_colors /= 255.0
else:
raise NotImplementedError
pcd.colors = o3d.utility.Vector3dVector(points_colors)
vis.add_geometry(pcd)
return pcd, points_colors
def _draw_bboxes(bbox3d,
vis,
points_colors,
pcd=None,
bbox_color=(0, 1, 0),
points_in_box_color=(1, 0, 0),
rot_axis=2,
center_mode='lidar_bottom',
mode='xyz'):
"""Draw bbox on visualizer and change the color of points inside bbox3d.
Args:
bbox3d (numpy.array | torch.tensor, shape=[M, 7]):
3d bbox (x, y, z, dx, dy, dz, yaw) to visualize.
vis (:obj:`open3d.visualization.Visualizer`): open3d visualizer.
points_colors (numpy.array): color of each points.
pcd (:obj:`open3d.geometry.PointCloud`): point cloud. Default: None.
bbox_color (tuple[float]): the color of bbox. Default: (0, 1, 0).
points_in_box_color (tuple[float]):
the color of points inside bbox3d. Default: (1, 0, 0).
rot_axis (int): rotation axis of bbox. Default: 2.
center_mode (bool): indicate the center of bbox is bottom center
or gravity center. avaliable mode
['lidar_bottom', 'camera_bottom']. Default: 'lidar_bottom'.
mode (str): indicate type of the input points, avaliable mode
['xyz', 'xyzrgb']. Default: 'xyz'.
"""
if isinstance(bbox3d, torch.Tensor):
bbox3d = bbox3d.cpu().numpy()
bbox3d = bbox3d.copy()
in_box_color = np.array(points_in_box_color)
for i in range(len(bbox3d)):
center = bbox3d[i, 0:3]
dim = bbox3d[i, 3:6]
yaw = np.zeros(3)
yaw[rot_axis] = -bbox3d[i, 6]
rot_mat = geometry.get_rotation_matrix_from_xyz(yaw)
if center_mode == 'lidar_bottom':
center[rot_axis] += dim[
rot_axis] / 2 # bottom center to gravity center
elif center_mode == 'camera_bottom':
center[rot_axis] -= dim[
rot_axis] / 2 # bottom center to gravity center
box3d = geometry.OrientedBoundingBox(center, rot_mat, dim)
line_set = geometry.LineSet.create_from_oriented_bounding_box(box3d)
line_set.paint_uniform_color(bbox_color)
# draw bboxes on visualizer
vis.add_geometry(line_set)
# change the color of points which are in box
if pcd is not None and mode == 'xyz':
indices = box3d.get_point_indices_within_bounding_box(pcd.points)
points_colors[indices] = in_box_color
# update points colors
if pcd is not None:
pcd.colors = o3d.utility.Vector3dVector(points_colors)
vis.update_geometry(pcd)
def show_pts_boxes(points,
bbox3d=None,
show=True,
save_path=None,
points_size=2,
point_color=(0.5, 0.5, 0.5),
bbox_color=(0, 1, 0),
points_in_box_color=(1, 0, 0),
rot_axis=2,
center_mode='lidar_bottom',
mode='xyz'):
"""Draw bbox and points on visualizer.
Args:
points (numpy.array | torch.tensor, shape=[N, 3+C]):
points to visualize.
bbox3d (numpy.array | torch.tensor, shape=[M, 7]):
3d bbox (x, y, z, dx, dy, dz, yaw) to visualize. Default: None.
show (bool): whether to show the visualization results. Default: True.
save_path (str): path to save visualized results. Default: None.
points_size (int): the size of points to show on visualizer.
Default: 2.
point_color (tuple[float]): the color of points.
Default: (0.5, 0.5, 0.5).
bbox_color (tuple[float]): the color of bbox. Default: (0, 1, 0).
points_in_box_color (tuple[float]):
the color of points which are in bbox3d. Default: (1, 0, 0).
rot_axis (int): rotation axis of bbox. Default: 2.
center_mode (bool): indicate the center of bbox is bottom center
or gravity center. avaliable mode
['lidar_bottom', 'camera_bottom']. Default: 'lidar_bottom'.
mode (str): indicate type of the input points, avaliable mode
['xyz', 'xyzrgb']. Default: 'xyz'.
"""
# TODO: support score and class info
assert 0 <= rot_axis <= 2
# init visualizer
vis = o3d.visualization.Visualizer()
vis.create_window()
mesh_frame = geometry.TriangleMesh.create_coordinate_frame(
size=1, origin=[0, 0, 0]) # create coordinate frame
vis.add_geometry(mesh_frame)
# draw points
pcd, points_colors = _draw_points(points, vis, points_size, point_color,
mode)
# draw boxes
if bbox3d is not None:
_draw_bboxes(bbox3d, vis, points_colors, pcd, bbox_color,
points_in_box_color, rot_axis, center_mode, mode)
if show:
vis.run()
if save_path is not None:
vis.capture_screen_image(save_path)
vis.destroy_window()
def _draw_bboxes_ind(bbox3d,
vis,
indices,
points_colors,
pcd=None,
bbox_color=(0, 1, 0),
points_in_box_color=(1, 0, 0),
rot_axis=2,
center_mode='lidar_bottom',
mode='xyz'):
"""Draw bbox on visualizer and change the color or points inside bbox3d
with indices.
Args:
bbox3d (numpy.array | torch.tensor, shape=[M, 7]):
3d bbox (x, y, z, dx, dy, dz, yaw) to visualize.
vis (:obj:`open3d.visualization.Visualizer`): open3d visualizer.
indices (numpy.array | torch.tensor, shape=[N, M]):
indicate which bbox3d that each point lies in.
points_colors (numpy.array): color of each points.
pcd (:obj:`open3d.geometry.PointCloud`): point cloud. Default: None.
bbox_color (tuple[float]): the color of bbox. Default: (0, 1, 0).
points_in_box_color (tuple[float]):
the color of points which are in bbox3d. Default: (1, 0, 0).
rot_axis (int): rotation axis of bbox. Default: 2.
center_mode (bool): indicate the center of bbox is bottom center
or gravity center. avaliable mode
['lidar_bottom', 'camera_bottom']. Default: 'lidar_bottom'.
mode (str): indicate type of the input points, avaliable mode
['xyz', 'xyzrgb']. Default: 'xyz'.
"""
if isinstance(bbox3d, torch.Tensor):
bbox3d = bbox3d.cpu().numpy()
if isinstance(indices, torch.Tensor):
indices = indices.cpu().numpy()
bbox3d = bbox3d.copy()
in_box_color = np.array(points_in_box_color)
for i in range(len(bbox3d)):
center = bbox3d[i, 0:3]
dim = bbox3d[i, 3:6]
yaw = np.zeros(3)
# TODO: fix problem of current coordinate system
# dim[0], dim[1] = dim[1], dim[0] # for current coordinate
# yaw[rot_axis] = -(bbox3d[i, 6] - 0.5 * np.pi)
yaw[rot_axis] = -bbox3d[i, 6]
rot_mat = geometry.get_rotation_matrix_from_xyz(yaw)
if center_mode == 'lidar_bottom':
center[rot_axis] += dim[
rot_axis] / 2 # bottom center to gravity center
elif center_mode == 'camera_bottom':
center[rot_axis] -= dim[
rot_axis] / 2 # bottom center to gravity center
box3d = geometry.OrientedBoundingBox(center, rot_mat, dim)
line_set = geometry.LineSet.create_from_oriented_bounding_box(box3d)
line_set.paint_uniform_color(bbox_color)
# draw bboxes on visualizer
vis.add_geometry(line_set)
# change the color of points which are in box
if pcd is not None and mode == 'xyz':
points_colors[indices[:, i].astype(np.bool)] = in_box_color
# update points colors
if pcd is not None:
pcd.colors = o3d.utility.Vector3dVector(points_colors)
vis.update_geometry(pcd)
def show_pts_index_boxes(points,
bbox3d=None,
show=True,
indices=None,
save_path=None,
points_size=2,
point_color=(0.5, 0.5, 0.5),
bbox_color=(0, 1, 0),
points_in_box_color=(1, 0, 0),
rot_axis=2,
center_mode='lidar_bottom',
mode='xyz'):
"""Draw bbox and points on visualizer with indices that indicate which
bbox3d that each point lies in.
Args:
points (numpy.array | torch.tensor, shape=[N, 3+C]):
points to visualize.
bbox3d (numpy.array | torch.tensor, shape=[M, 7]):
3d bbox (x, y, z, dx, dy, dz, yaw) to visualize. Default: None.
show (bool): whether to show the visualization results. Default: True.
indices (numpy.array | torch.tensor, shape=[N, M]):
indicate which bbox3d that each point lies in. Default: None.
save_path (str): path to save visualized results. Default: None.
points_size (int): the size of points to show on visualizer.
Default: 2.
point_color (tuple[float]): the color of points.
Default: (0.5, 0.5, 0.5).
bbox_color (tuple[float]): the color of bbox. Default: (0, 1, 0).
points_in_box_color (tuple[float]):
the color of points which are in bbox3d. Default: (1, 0, 0).
rot_axis (int): rotation axis of bbox. Default: 2.
center_mode (bool): indicate the center of bbox is bottom center
or gravity center. avaliable mode
['lidar_bottom', 'camera_bottom']. Default: 'lidar_bottom'.
mode (str): indicate type of the input points, avaliable mode
['xyz', 'xyzrgb']. Default: 'xyz'.
"""
# TODO: support score and class info
assert 0 <= rot_axis <= 2
# init visualizer
vis = o3d.visualization.Visualizer()
vis.create_window()
mesh_frame = geometry.TriangleMesh.create_coordinate_frame(
size=1, origin=[0, 0, 0]) # create coordinate frame
vis.add_geometry(mesh_frame)
# draw points
pcd, points_colors = _draw_points(points, vis, points_size, point_color,
mode)
# draw boxes
if bbox3d is not None:
_draw_bboxes_ind(bbox3d, vis, indices, points_colors, pcd, bbox_color,
points_in_box_color, rot_axis, center_mode, mode)
if show:
vis.run()
if save_path is not None:
vis.capture_screen_image(save_path)
vis.destroy_window()
def project_pts_on_img(points,
raw_img,
lidar2img_rt,
max_distance=70,
thickness=-1):
"""Project the 3D points cloud on 2D image.
Args:
points (numpy.array): 3D points cloud (x, y, z) to visualize.
raw_img (numpy.array): The numpy array of image.
lidar2img_rt (numpy.array, shape=[4, 4]): The projection matrix
according to the camera intrinsic parameters.
max_distance (float): the max distance of the points cloud.
Default: 70.
thickness (int, optional): The thickness of 2D points. Default: -1.
"""
img = raw_img.copy()
num_points = points.shape[0]
pts_4d = np.concatenate([points[:, :3], np.ones((num_points, 1))], axis=-1)
pts_2d = pts_4d @ lidar2img_rt.T
# cam_points is Tensor of Nx4 whose last column is 1
# transform camera coordinate to image coordinate
pts_2d[:, 2] = np.clip(pts_2d[:, 2], a_min=1e-5, a_max=99999)
pts_2d[:, 0] /= pts_2d[:, 2]
pts_2d[:, 1] /= pts_2d[:, 2]
fov_inds = ((pts_2d[:, 0] < img.shape[1])
& (pts_2d[:, 0] >= 0)
& (pts_2d[:, 1] < img.shape[0])
& (pts_2d[:, 1] >= 0))
imgfov_pts_2d = pts_2d[fov_inds, :3] # u, v, d
cmap = plt.cm.get_cmap('hsv', 256)
cmap = np.array([cmap(i) for i in range(256)])[:, :3] * 255
for i in range(imgfov_pts_2d.shape[0]):
depth = imgfov_pts_2d[i, 2]
color = cmap[np.clip(int(max_distance * 10 / depth), 0, 255), :]
cv2.circle(
img,
center=(int(np.round(imgfov_pts_2d[i, 0])),
int(np.round(imgfov_pts_2d[i, 1]))),
radius=1,
color=tuple(color),
thickness=thickness,
)
cv2.imshow('project_pts_img', img.astype(np.uint8))
cv2.waitKey(100)
def project_bbox3d_on_img(bboxes3d,
raw_img,
lidar2img_rt,
color=(0, 255, 0),
thickness=1):
"""Project the 3D bbox on 2D image.
Args:
bboxes3d (numpy.array, shape=[M, 7]):
3d bbox (x, y, z, dx, dy, dz, yaw) to visualize.
raw_img (numpy.array): The numpy array of image.
lidar2img_rt (numpy.array, shape=[4, 4]): The projection matrix
according to the camera intrinsic parameters.
color (tuple[int]): the color to draw bboxes. Default: (0, 255, 0).
thickness (int, optional): The thickness of bboxes. Default: 1.
"""
img = raw_img.copy()
corners_3d = bboxes3d.corners
num_bbox = corners_3d.shape[0]
pts_4d = np.concatenate(
[corners_3d.reshape(-1, 3),
np.ones((num_bbox * 8, 1))], axis=-1)
pts_2d = pts_4d @ lidar2img_rt.T
pts_2d[:, 2] = np.clip(pts_2d[:, 2], a_min=1e-5, a_max=1e5)
pts_2d[:, 0] /= pts_2d[:, 2]
pts_2d[:, 1] /= pts_2d[:, 2]
imgfov_pts_2d = pts_2d[..., :2].reshape(num_bbox, 8, 2)
line_indices = ((0, 1), (0, 3), (0, 4), (1, 2), (1, 5), (3, 2), (3, 7),
(4, 5), (4, 7), (2, 6), (5, 6), (6, 7))
for i in range(num_bbox):
corners = imgfov_pts_2d[i].astype(np.int)
for start, end in line_indices:
cv2.line(img, (corners[start, 0], corners[start, 1]),
(corners[end, 0], corners[end, 1]), color, thickness,
cv2.LINE_AA)
cv2.imshow('project_bbox3d_img', img.astype(np.uint8))
cv2.waitKey(0)
def draw_lidar_bbox3d_on_img(bboxes3d,
raw_img,
lidar2img_rt,
img_metas,
color=(0, 255, 0),
thickness=1):
"""Project the 3D bbox on 2D plane and draw on input image.
Args:
bboxes3d (numpy.array, shape=[M, 7]):
3d bbox (x, y, z, dx, dy, dz, yaw) to visualize.
raw_img (numpy.array): The numpy array of image.
lidar2img_rt (numpy.array, shape=[4, 4]): The projection matrix
according to the camera intrinsic parameters.
img_metas (dict): Useless here.
color (tuple[int]): The color to draw bboxes. Default: (0, 255, 0).
thickness (int, optional): The thickness of bboxes. Default: 1.
"""
img = raw_img.copy()
corners_3d = bboxes3d.corners
num_bbox = corners_3d.shape[0]
pts_4d = np.concatenate(
[corners_3d.reshape(-1, 3),
np.ones((num_bbox * 8, 1))], axis=-1)
lidar2img_rt = copy.deepcopy(lidar2img_rt).reshape(4, 4)
if isinstance(lidar2img_rt, torch.Tensor):
lidar2img_rt = lidar2img_rt.cpu().numpy()
pts_2d = pts_4d @ lidar2img_rt.T
pts_2d[:, 2] = np.clip(pts_2d[:, 2], a_min=1e-5, a_max=1e5)
pts_2d[:, 0] /= pts_2d[:, 2]
pts_2d[:, 1] /= pts_2d[:, 2]
imgfov_pts_2d = pts_2d[..., :2].reshape(num_bbox, 8, 2)
line_indices = ((0, 1), (0, 3), (0, 4), (1, 2), (1, 5), (3, 2), (3, 7),
(4, 5), (4, 7), (2, 6), (5, 6), (6, 7))
for i in range(num_bbox):
corners = imgfov_pts_2d[i].astype(np.int)
for start, end in line_indices:
cv2.line(img, (corners[start, 0], corners[start, 1]),
(corners[end, 0], corners[end, 1]), color, thickness,
cv2.LINE_AA)
return img.astype(np.uint8)
def draw_depth_bbox3d_on_img(bboxes3d,
raw_img,
calibs,
img_metas,
color=(0, 255, 0),
thickness=1):
"""Project the 3D bbox on 2D plane and draw on input image.
Args:
bboxes3d (numpy.array, shape=[M, 7]):
3d camera bbox (x, y, z, dx, dy, dz, yaw) to visualize.
raw_img (numpy.array): The numpy array of image.
calibs (dict): Camera calibration information, Rt and K.
img_metas (dict): Used in coordinates transformation.
color (tuple[int]): The color to draw bboxes. Default: (0, 255, 0).
thickness (int, optional): The thickness of bboxes. Default: 1.
"""
from mmdet3d.core import Coord3DMode
from mmdet3d.core.bbox import points_cam2img
from mmdet3d.models import apply_3d_transformation
img = raw_img.copy()
calibs = copy.deepcopy(calibs)
img_metas = copy.deepcopy(img_metas)
corners_3d = bboxes3d.corners
num_bbox = corners_3d.shape[0]
points_3d = corners_3d.reshape(-1, 3)
assert ('Rt' in calibs.keys() and 'K' in calibs.keys()), \
'Rt and K matrix should be provided as camera caliberation information'
if not isinstance(calibs['Rt'], torch.Tensor):
calibs['Rt'] = torch.from_numpy(np.array(calibs['Rt']))
if not isinstance(calibs['K'], torch.Tensor):
calibs['K'] = torch.from_numpy(np.array(calibs['K']))
calibs['Rt'] = calibs['Rt'].reshape(3, 3).float().cpu()
calibs['K'] = calibs['K'].reshape(3, 3).float().cpu()
# first reverse the data transformations
xyz_depth = apply_3d_transformation(
points_3d, 'DEPTH', img_metas, reverse=True)
# then convert from depth coords to camera coords
xyz_cam = Coord3DMode.convert_point(
xyz_depth, Coord3DMode.DEPTH, Coord3DMode.CAM, rt_mat=calibs['Rt'])
# project to 2d to get image coords (uv)
uv_origin = points_cam2img(xyz_cam, calibs['K'])
uv_origin = (uv_origin - 1).round()
imgfov_pts_2d = uv_origin[..., :2].reshape(num_bbox, 8, 2).numpy()
line_indices = ((0, 1), (0, 3), (0, 4), (1, 2), (1, 5), (3, 2), (3, 7),
(4, 5), (4, 7), (2, 6), (5, 6), (6, 7))
for i in range(num_bbox):
corners = imgfov_pts_2d[i].astype(np.int)
for start, end in line_indices:
cv2.line(img, (corners[start, 0], corners[start, 1]),
(corners[end, 0], corners[end, 1]), color, thickness,
cv2.LINE_AA)
return img.astype(np.uint8)
class Visualizer(object):
r"""Online visualizer implemented with Open3d.
Args:
points (numpy.array, shape=[N, 3+C]): Points to visualize. The Points
cloud is in mode of Coord3DMode.DEPTH (please refer to
core.structures.coord_3d_mode).
bbox3d (numpy.array, shape=[M, 7]): 3d bbox (x, y, z, dx, dy, dz, yaw)
to visualize. The 3d bbox is in mode of Box3DMode.DEPTH with
gravity_center (please refer to core.structures.box_3d_mode).
Default: None.
save_path (str): path to save visualized results. Default: None.
points_size (int): the size of points to show on visualizer.
Default: 2.
point_color (tuple[float]): the color of points.
Default: (0.5, 0.5, 0.5).
bbox_color (tuple[float]): the color of bbox. Default: (0, 1, 0).
points_in_box_color (tuple[float]):
the color of points which are in bbox3d. Default: (1, 0, 0).
rot_axis (int): rotation axis of bbox. Default: 2.
center_mode (bool): indicate the center of bbox is bottom center
or gravity center. avaliable mode
['lidar_bottom', 'camera_bottom']. Default: 'lidar_bottom'.
mode (str): indicate type of the input points, avaliable mode
['xyz', 'xyzrgb']. Default: 'xyz'.
"""
def __init__(self,
points,
bbox3d=None,
save_path=None,
points_size=2,
point_color=(0.5, 0.5, 0.5),
bbox_color=(0, 1, 0),
points_in_box_color=(1, 0, 0),
rot_axis=2,
center_mode='lidar_bottom',
mode='xyz'):
super(Visualizer, self).__init__()
assert 0 <= rot_axis <= 2
# init visualizer
self.o3d_visualizer = o3d.visualization.Visualizer()
self.o3d_visualizer.create_window()
mesh_frame = geometry.TriangleMesh.create_coordinate_frame(
size=1, origin=[0, 0, 0]) # create coordinate frame
self.o3d_visualizer.add_geometry(mesh_frame)
self.points_size = points_size
self.point_color = point_color
self.bbox_color = bbox_color
self.points_in_box_color = points_in_box_color
self.rot_axis = rot_axis
self.center_mode = center_mode
self.mode = mode
self.seg_num = 0
# draw points
if points is not None:
self.pcd, self.points_colors = _draw_points(
points, self.o3d_visualizer, points_size, point_color, mode)
# draw boxes
if bbox3d is not None:
_draw_bboxes(bbox3d, self.o3d_visualizer, self.points_colors,
self.pcd, bbox_color, points_in_box_color, rot_axis,
center_mode, mode)
def add_bboxes(self, bbox3d, bbox_color=None, points_in_box_color=None):
"""Add bounding box to visualizer.
Args:
bbox3d (numpy.array, shape=[M, 7]):
3D bbox (x, y, z, dx, dy, dz, yaw) to be visualized.
The 3d bbox is in mode of Box3DMode.DEPTH with
gravity_center (please refer to core.structures.box_3d_mode).
bbox_color (tuple[float]): the color of bbox. Defaule: None.
points_in_box_color (tuple[float]): the color of points which
are in bbox3d. Defaule: None.
"""
if bbox_color is None:
bbox_color = self.bbox_color
if points_in_box_color is None:
points_in_box_color = self.points_in_box_color
_draw_bboxes(bbox3d, self.o3d_visualizer, self.points_colors, self.pcd,
bbox_color, points_in_box_color, self.rot_axis,
self.center_mode, self.mode)
def add_seg_mask(self, seg_mask_colors):
"""Add segmentation mask to visualizer via per-point colorization.
Args:
seg_mask_colors (numpy.array, shape=[N, 6]):
The segmentation mask whose first 3 dims are point coordinates
and last 3 dims are converted colors.
"""
# we can't draw the colors on existing points
# in case gt and pred mask would overlap
# instead we set a large offset along x-axis for each seg mask
self.seg_num += 1
offset = (np.array(self.pcd.points).max(0) -
np.array(self.pcd.points).min(0))[0] * 1.2 * self.seg_num
mesh_frame = geometry.TriangleMesh.create_coordinate_frame(
size=1, origin=[offset, 0, 0]) # create coordinate frame for seg
self.o3d_visualizer.add_geometry(mesh_frame)
seg_points = copy.deepcopy(seg_mask_colors)
seg_points[:, 0] += offset
_draw_points(
seg_points, self.o3d_visualizer, self.points_size, mode='xyzrgb')
def show(self, save_path=None):
"""Visualize the points cloud.
Args:
save_path (str): path to save image. Default: None.
"""
self.o3d_visualizer.run()
if save_path is not None:
self.o3d_visualizer.capture_screen_image(save_path)
self.o3d_visualizer.destroy_window()
return
```
#### File: plugin/packnet/utils.py
```python
import torch
import torch.nn as nn
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes),
nn.ReLU(out_planes)
)
class DepthPredictHead2Up(nn.Module):
'''
1. We use a softplus activation to generate positive depths.
The predicted depth is no longer bounded.
2. The network predicts depth rather than disparity, and at a single scale.
'''
def __init__(self, in_channels):
super(DepthPredictHead2Up, self).__init__()
self.up = nn.PixelShuffle(2)
self.conv1 = conv(in_channels//4, in_channels//4, kernel_size=3)
self.conv2 = conv(in_channels//16, in_channels//16, kernel_size=3)
self.conv3 = conv(in_channels//64, in_channels//64, kernel_size=3)
self.conv4 = conv(in_channels//64, in_channels//64, kernel_size=3)
self.conv5 = conv(in_channels//64, 1, kernel_size=1, padding=0)
def forward(self, x):
x = self.up(x)
x = self.conv1(x)
x = self.up(x)
x = self.conv2(x)
x = self.up(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
pred = nn.functional.softplus(x)
return pred
def get_depth_metrics(pred, gt, mask=None):
"""
params:
pred: [N,1,H,W]. torch.Tensor
gt: [N,1,H,W]. torch.Tensor
"""
if mask is not None:
num = torch.sum(mask) # the number of non-zeros
pred = pred[mask]
gt = gt[mask]
else:
num = pred.numel()
num = num * 1.0
diff_i = gt - pred
abs_diff = torch.sum(torch.abs(diff_i)) / num
abs_rel = torch.sum(torch.abs(diff_i) / gt) / num
sq_rel = torch.sum(diff_i ** 2 / gt) / num
rmse = torch.sqrt(torch.sum(diff_i ** 2) / num)
rmse_log = torch.sqrt(torch.sum((torch.log(gt) -
torch.log(pred)) ** 2) / num)
return abs_diff, abs_rel, sq_rel, rmse, rmse_log
```
#### File: plugin/radar/pipelines.py
```python
from tools.surrdet.get_scene_flow_v2 import points_to_ann_cord
from IPython.terminal.embed import embed
from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines import LoadAnnotations, LoadImageFromFile
from typing import Tuple, List, Union
import numpy as np
import torch
import mmcv
@PIPELINES.register_module()
class LoadDepthImages(object):
_render_types = ['naive']
def __init__(self, img_size=(480, 892), render_type='naive'):
'''
img_size: List or Tuple with two elemets: h, w
'''
assert render_type in self._render_types, 'render_type:{} is not supported!'.format(render_type)
self.render_type = render_type
self.img_size = img_size # ()
self.ylim, self.xlim = img_size
def sort_points(self, points):
'''
sort the points accroding to their depth in descending order
'''
depth = points[:, 2]
idx = np.argsort(depth) # ascending order
idx = idx[::-1]
new_points = points[idx]
return new_points
def naive_depth_render(self, points, depth_map):
'''
for float cord, use its int version
'''
points = self.sort_points(points)
x_cords = points[:, 0] * self.xlim / 1600.0
y_cords = points[:, 1] * self.ylim / 900.0
depth = points[:, 2]
depth = np.clip(depth, a_min=1e-5, a_max=99999)
#print('debug', depth[:10].mean(), depth[10:100].mean(), depth[-100:].mean())
#print('debug', x_cords.max(), y_cords.max(), depth_map.shape)
x_cords = x_cords.astype(np.int)
y_cords = y_cords.astype(np.int)
# first y, then x
#print(depth_map.shape, )
depth_map[y_cords, x_cords] = points[:,2]
return depth_map
def __call__(self, results):
npy_file_paths = results['npy_info']['depth_paths']
i = 0
results['seg_fields'] = []
for npy_file_path in npy_file_paths:
if npy_file_path is None:
depth_map = np.zeros((*self.img_size, 1))
results['seg_fields'].append('depth_map{}'.format(i))
results['depth_map{}'.format(i)] = depth_map
i += 1
continue
points = np.load(npy_file_path) # of shape [N, 3]: x, y, depth
depth_map = np.zeros(self.img_size)
if depth_map.ndim == 2:
#depth_map = depth_map[:, :, np.newaxis]
pass
if self.render_type == 'naive':
depth_map = self.naive_depth_render(points, depth_map)
results['seg_fields'].append('depth_map{}'.format(i))
results['depth_map{}'.format(i)] = depth_map.astype(np.float32)
i += 1
#depth_map[:,:, np.newaxis]
#print('debbug', results['depth_map'].shape, results['img'].shape, type(results['img']), '\n')
return results
@PIPELINES.register_module()
class LoadSceneFlows(object):
_render_types = ['naive']
def __init__(self, img_size=(480, 892), render_type='naive'):
'''
img_size: List or Tuple with two elemets: h, w
'''
assert render_type in self._render_types, 'render_type:{} is not supported!'.format(render_type)
self.render_type = render_type
self.img_size = img_size # ()
self.ylim, self.xlim = img_size
def sort_points(self, points):
'''
sort the points accroding to their depth in descending order
'''
depth = points[:, 2]
idx = np.argsort(depth) # ascending order
idx = idx[::-1]
new_points = points[idx]
return new_points
def naive_sceneflow_render(self, points, sf_map, valid_mask):
'''
for float cord, use its int version
'''
points = points.transpose()
points = self.sort_points(points)
# filter out noisy static points
scene_flow = points[:, 3:]
speed = np.linalg.norm(scene_flow, axis=-1)
moving_mask = (speed > 0.2)
static_points = points[np.logical_not(moving_mask)]
moving_points = points[moving_mask]
static_points[:, 3:] = static_points[:, 3:] * 0
points = np.concatenate([moving_points, static_points], axis=0)
x_cords = points[:, 0] * self.xlim / 1600.0
y_cords = points[:, 1] * self.ylim / 900.0
x_cords = x_cords.astype(np.int)
y_cords = y_cords.astype(np.int)
scene_flow = np.clip(points[:, 3:], a_min=-100, a_max=100)
sf_map[y_cords, x_cords, :] = scene_flow
valid_mask[y_cords, x_cords] = 1
sf_map = np.concatenate([sf_map, valid_mask], axis=-1)
return sf_map
def __call__(self, results):
npy_file_paths = results['npy_info']['sf_paths']
i = 0
for npy_file_path in npy_file_paths:
if npy_file_path is None:
sf_map = np.zeros((*self.img_size, 4))
results['seg_fields'].append('sf_map{}'.format(i))
results['sf_map{}'.format(i)] = sf_map
i += 1
continue
points = np.load(npy_file_path) # of shape [N, 3]: x, y, depth
sf_map = np.zeros((*self.img_size, 3))
valid_mask = np.zeros((*self.img_size, 1))
if self.render_type == 'naive':
sf_map = self.naive_sceneflow_render(points, sf_map, valid_mask) # [H,W,4]
# 900x1600 => 1600x900
# depth_map = depth_map.transpose(1,0)
#results['seg_fields'] = torch.tensor(depth_map)
results['seg_fields'].append('sf_map{}'.format(i))
results['sf_map{}'.format(i)] = sf_map
i += 1
return results
@PIPELINES.register_module()
class ResizeDepthImage(object):
def __init__(self, scale:float = 1/4, interpolation='bilinear'):
self.scale = scale
self.interpolation = interpolation
def __call__(self, results):
depth_map = results['depth_map'] #
new_depth_map = mmcv.imrescale(depth_map, self.scale, interpolation=self.interpolation)
results['depth_map'] = new_depth_map
return results
@PIPELINES.register_module()
class LoadImageFromFiles(object):
"""Load an image from file.
Required keys are "img_prefix" and "img_info" (a dict that must contain the
key "filename"). Added or updated keys are "filename", "img", "img_shape",
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
"scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
Defaults to 'color'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
"""
def __init__(self,
to_float32=False,
color_type='color',
file_client_args=dict(backend='disk')):
self.to_float32 = to_float32
self.color_type = color_type
self.file_client_args = file_client_args.copy()
self.file_client = None
def __call__(self, results):
"""Call functions to load image and get image meta information.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded image and meta information.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
filenames = results['img_info']['filenames']
i = 0
results['img_fields'] = []
for filename in filenames:
img_bytes = self.file_client.get(filename)
img = mmcv.imfrombytes(img_bytes, flag=self.color_type)
if self.to_float32:
img = img.astype(np.float32)
results['img{}'.format(i)] = img
results['img_fields'].append('img{}'.format(i))
i += 1
results['filename'] = filename
results['ori_filename'] = results['img_info']['filename']
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'to_float32={self.to_float32}, '
f"color_type='{self.color_type}', "
f'file_client_args={self.file_client_args})')
return repr_str
```
#### File: plugin/radar/utils.py
```python
import torch
import torch.nn as nn
import matplotlib as mpl
import matplotlib.cm as cm
import numpy as np
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes),
nn.ReLU(out_planes)
)
class DepthPredictHead2Up(nn.Module):
'''
1. We use a softplus activation to generate positive depths.
The predicted depth is no longer bounded.
2. The network predicts depth rather than disparity, and at a single scale.
'''
def __init__(self, in_channels):
super(DepthPredictHead2Up, self).__init__()
self.up = nn.PixelShuffle(2)
self.conv1 = conv(in_channels//4, in_channels//4, kernel_size=3)
self.conv2 = conv(in_channels//16, in_channels//16, kernel_size=3)
self.conv3 = conv(in_channels//64, in_channels//64, kernel_size=3)
self.conv4 = conv(in_channels//64, in_channels//64, kernel_size=3)
self.conv5 = conv(in_channels//64, 1, kernel_size=1, padding=0)
def forward(self, x):
x = self.up(x)
x = self.conv1(x)
x = self.up(x)
x = self.conv2(x)
x = self.up(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
pred = nn.functional.softplus(x)
return pred
def get_depth_metrics(pred, gt, mask=None, scale=False):
"""
params:
pred: [N,1,H,W]. torch.Tensor
gt: [N,1,H,W]. torch.Tensor
scale: bool. True: scale the pred depth using median(typically used for eval unsupervised depth)
"""
if mask is not None:
num = torch.sum(mask) # the number of non-zeros
pred = pred[mask]
gt = gt[mask]
else:
num = pred.numel()
if scale:
ratio = torch.median(gt) / (torch.median(pred) + 1e-4)
pred = pred * ratio
else:
ratio = torch.median(gt) / (torch.median(pred) + 1e-4)
num = num * 1.0
diff_i = gt - pred
abs_diff = torch.sum(torch.abs(diff_i)) / num
abs_rel = torch.sum(torch.abs(diff_i) / gt) / num
sq_rel = torch.sum(diff_i ** 2 / gt) / num
rmse = torch.sqrt(torch.sum(diff_i ** 2) / num)
rmse_log = torch.sqrt(torch.sum((torch.log(gt) -
torch.log(pred)) ** 2) / num)
return abs_diff, abs_rel, sq_rel, rmse, rmse_log, torch.tensor(ratio)
def get_motion_metrics(pred, gt, scale=False):
'''
pred: [N, 3, H, W]
gt: [N, 4, H, W] (last channel as mask)
'''
mask = gt[:, -1, :, :].type(torch.bool)
num = torch.sum(mask) # the number of non-zeros
if num < 1:
return [torch.zeros(1), torch.zeros(1), torch.zeros(1), ]
pred = torch.stack([pred[:, 0, ...][mask],
pred[:, 1, ...][mask],
pred[:, 2, ...][mask]], dim=1)
gt = torch.stack([gt[:, 0, ...][mask],
gt[:, 1, ...][mask],
gt[:, 2, ...][mask]], dim=1)
#print(pred.shape)
pred_speed = (torch.sum(pred ** 2, dim=1) + 1e-6) ** 0.5
gt_speed = (torch.sum(gt ** 2, dim=1) + 1e-6) ** 0.5
if scale:
ratio = torch.median(gt_speed) / (torch.median(pred_speed) + 1e-4)
pred = pred * ratio
else:
ratio = torch.median(gt_speed) / (torch.median(pred_speed) + 1e-4)
num = num * 1.0
diff_i = gt - pred
epe_map = (diff_i) ** 2
#print(epe_map.shape)
epe_map = (epe_map.sum(dim=-1, ) + 1e-6) ** 0.5
#print(epe_map.shape, 'after')
epe = torch.mean(epe_map)
epe_rel = torch.sum(epe_map / gt_speed) / num
return [epe, epe_rel, torch.tensor(ratio)]
def flow2rgb(flow_map_np):
'''
flow_map_np: [H, W, 2/3]
orginally used for optical flow visualization
'''
h, w, _ = flow_map_np.shape
rgb_map = np.ones((h, w, 3)).astype(np.float32)
normalized_flow_map = flow_map_np / (np.abs(flow_map_np).max())
rgb_map[:, :, 0] += normalized_flow_map[:, :, 0]
rgb_map[:, :, 1] -= 0.5 * (normalized_flow_map[:, :, 0] + normalized_flow_map[:, :, 1])
rgb_map[:, :, 2] += normalized_flow_map[:, :, 2]
return rgb_map.clip(0, 1)
def remap_invdepth_color(disp):
'''
disp: torch.Tensor [1, H, W]
'''
disp_np = disp.squeeze().cpu().numpy()
vmax = np.percentile(disp_np, 95)
normalizer = mpl.colors.Normalize(vmin=disp_np.min(), vmax=vmax)
#mapper = cm.ScalarMappable(norm=normalizer, cmap='magma')
mapper = cm.ScalarMappable(norm=normalizer, cmap='plasma')
# colormapped_im = (mapper.to_rgba(disp_np)[:, :, :3] * 255).astype(np.uint8)
# im = pil.fromarray(colormapped_im)
# shape [H, W, 3]
colormapped_im = (mapper.to_rgba(disp_np)[:, :, :3])
return colormapped_im
def disp2rgb(disp):
"""
disp: torch.Tensor [N, 1, H, W]
"""
all_rgbs = []
for i in range(disp.shape[0]):
t_disp = disp[i]
t_rgb = remap_invdepth_color(t_disp) # [H, W, 3]
all_rgbs.append(t_rgb)
ret_rgb = np.stack(all_rgbs, axis=0) # [N, H, W, 3]
return ret_rgb
def _gradient_x(img):
return img[:, :, :-1, :] - img[:, :, 1:, :]
def _gradient_y(img):
return img[:, :, :, :-1] - img[:, :, :, 1:]
def get_smooth_loss(preds, img):
'''
egde guided smoothing loss
preds: shape [N, 1/K, H, W]
img: shape [N, C, H, W]
'''
loss = 0
B, _, H, W = img.shape
# [N, 1, H, W]
weights_x = torch.exp(-torch.mean(abs(_gradient_x(img)), dim=1))
weights_y = torch.exp(-torch.mean(abs(_gradient_y(img)), dim=1))
if isinstance(preds, list):
for pred in preds:
up_pred = nn.functional.interpolate(pred, size=[H, W])
dep_dx = abs(_gradient_x(up_pred))
dep_dy = abs(_gradient_y(up_pred))
loss1 = torch.sum(dep_dx * weights_x) / torch.numel(dep_dx)
loss1 += torch.sum(dep_dy * weights_y) / torch.numel(dep_dy)
loss += loss1
else:
# [N, 1, H, W]
dep_dx = abs(_gradient_x(preds))
dep_dy = abs(_gradient_y(preds))
loss = torch.sum(dep_dx * weights_x) / torch.numel(dep_dx)
loss += torch.sum(dep_dy * weights_y) / torch.numel(dep_dy)
return loss
def sparsity_loss(preds):
"""
preds: [N, 3/1, H, W]
"""
preds_abs = torch.abs(preds)
preds_spatial_abs_mean = torch.mean(preds_abs, dim=[2, 3], keepdim=True).detach()
sparse_map = 2 * preds_spatial_abs_mean * \
torch.sqrt(preds_abs / (preds_spatial_abs_mean+1e-6) + 1)
return torch.mean(sparse_map)
def group_smoothness(preds):
"""
preds: [N, 1/3, H, W]
"""
preds_dx = preds - torch.roll(preds, 1, 3)
preds_dy = preds - torch.roll(preds, 1, 2)
preds_dx = preds_dx[:, :, 1:, 1:]
preds_dy = preds_dy[:,:, 1:, 1:]
smoothness = torch.mean(torch.sqrt(1e-5 + torch.square(preds_dx) + torch.square(preds_dy)))
return smoothness
```
#### File: plugin/radar-v3/loss.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class PhotoMetricLoss(nn.Module):
def __init__(self, w_l1=0.15, w_census=0.85, reduce='min'):
super().__init__()
self.w_l1 = w_l1
self.w_census = w_census
self.reduce = reduce
def forward(self, img, pred_list):
loss = 0
if self.w_l1 > 0:
l1_loss_maps = [torch.mean(torch.abs(img_pred - img), dim=1, keepdim=True) for img_pred in pred_list]
l1_loss_map = torch.cat(l1_loss_maps, dim=1)
if self.reduce == 'min':
l1_loss = torch.mean(torch.min(l1_loss_map, dim=1)[0])
else:
l1_loss = torch.mean(l1_loss_map)
loss = loss + self.w_l1 * l1_loss
if self.w_census > 0:
census_loss_maps = [TernaryLoss(img, img_pred) for img_pred in pred_list]
census_loss_map = torch.cat(census_loss_maps, dim=1)
if self.reduce == 'min':
census_loss = torch.mean(torch.min(census_loss_map, dim=1)[0])
else:
census_loss = torch.mean(census_loss_map)
loss = loss + self.w_census * census_loss
return loss
def TernaryLoss(im, im_warp, max_distance=1):
patch_size = 2 * max_distance + 1
def _rgb_to_grayscale(image):
grayscale = image[:, 0, :, :] * 0.2989 + \
image[:, 1, :, :] * 0.5870 + \
image[:, 2, :, :] * 0.1140
return grayscale.unsqueeze(1)
def _ternary_transform(image):
intensities = _rgb_to_grayscale(image) * 255
out_channels = patch_size * patch_size
w = torch.eye(out_channels).view((out_channels, 1, patch_size, patch_size))
weights = w.type_as(im)
patches = F.conv2d(intensities, weights, padding=max_distance)
transf = patches - intensities
transf_norm = transf / torch.sqrt(0.81 + torch.pow(transf, 2))
return transf_norm
def _hamming_distance(t1, t2):
dist = torch.pow(t1 - t2, 2)
dist_norm = dist / (0.1 + dist)
dist_mean = torch.mean(dist_norm, 1, keepdim=True) # instead of sum
return dist_mean
def _valid_mask(t, padding):
n, _, h, w = t.size()
inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t)
mask = F.pad(inner, [padding] * 4)
return mask
t1 = _ternary_transform(im)
t2 = _ternary_transform(im_warp)
dist = _hamming_distance(t1, t2)
mask = _valid_mask(im, max_distance)
return dist * mask
```
#### File: lidar_loss/geometry/pose_utils.py
```python
import torch
import numpy as np
########################################################################################################################
def euler2mat(angle):
"""Convert euler angles to rotation matrix"""
B = angle.size(0)
x, y, z = angle[:, 0], angle[:, 1], angle[:, 2]
cosz = torch.cos(z)
sinz = torch.sin(z)
zeros = z.detach() * 0
ones = zeros.detach() + 1
zmat = torch.stack([cosz, -sinz, zeros,
sinz, cosz, zeros,
zeros, zeros, ones], dim=1).view(B, 3, 3)
cosy = torch.cos(y)
siny = torch.sin(y)
ymat = torch.stack([cosy, zeros, siny,
zeros, ones, zeros,
-siny, zeros, cosy], dim=1).view(B, 3, 3)
cosx = torch.cos(x)
sinx = torch.sin(x)
xmat = torch.stack([ones, zeros, zeros,
zeros, cosx, -sinx,
zeros, sinx, cosx], dim=1).view(B, 3, 3)
rot_mat = xmat.bmm(ymat).bmm(zmat)
return rot_mat
########################################################################################################################
def pose_vec2mat(vec, mode='euler'):
"""Convert Euler parameters to transformation matrix."""
if mode is None:
return vec
trans, rot = vec[:, :3].unsqueeze(-1), vec[:, 3:]
if mode == 'euler':
rot_mat = euler2mat(rot)
else:
raise ValueError('Rotation mode not supported {}'.format(mode))
mat = torch.cat([rot_mat, trans], dim=2) # [B,3,4]
return mat
########################################################################################################################
def invert_pose(T):
"""Inverts a [B,4,4] torch.tensor pose"""
Tinv = torch.eye(4, device=T.device, dtype=T.dtype).repeat([len(T), 1, 1])
Tinv[:, :3, :3] = torch.transpose(T[:, :3, :3], -2, -1)
Tinv[:, :3, -1] = torch.bmm(-1. * Tinv[:, :3, :3], T[:, :3, -1].unsqueeze(-1)).squeeze(-1)
return Tinv
########################################################################################################################
def invert_pose_numpy(T):
"""Inverts a [4,4] np.array pose"""
Tinv = np.copy(T)
R, t = Tinv[:3, :3], Tinv[:3, 3]
Tinv[:3, :3], Tinv[:3, 3] = R.T, - np.matmul(R.T, t)
return Tinv
########################################################################################################################
def quat2mat(qw, qx, qy, qz, x, y, z):
"""Convert quaternion coefficients to rotation matrix.
Args:
quat: first three coeff of quaternion of rotation. fourht is then computed to have a norm of 1 -- size = [B, 3]
Returns:
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
"""
w2, x2, y2, z2 = qw * qw, qx * qx, qy * qy, qz * qz
wx, wy, wz = qw * qx, qw * qy, qw * qz
xy, xz, yz = qx * qy, qx * qz, qy * qz
Mat = torch.tensor([[w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, x],
[2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx, y],
[2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2, z],
[0, 0, 0, 1]], dtype=torch.float32).unsqueeze(0)
return Mat
########################################################################################################################
```
#### File: tools/data_converter/scannet_data_utils.py
```python
import mmcv
import numpy as np
from concurrent import futures as futures
from os import path as osp
class ScanNetData(object):
"""ScanNet data.
Generate scannet infos for scannet_converter.
Args:
root_path (str): Root path of the raw data.
split (str): Set split type of the data. Default: 'train'.
"""
def __init__(self, root_path, split='train'):
self.root_dir = root_path
self.split = split
self.split_dir = osp.join(root_path)
self.classes = [
'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',
'bookshelf', 'picture', 'counter', 'desk', 'curtain',
'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub',
'garbagebin'
]
self.cat2label = {cat: self.classes.index(cat) for cat in self.classes}
self.label2cat = {self.cat2label[t]: t for t in self.cat2label}
self.cat_ids = np.array(
[3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
self.cat_ids2class = {
nyu40id: i
for i, nyu40id in enumerate(list(self.cat_ids))
}
assert split in ['train', 'val', 'test']
split_file = osp.join(self.root_dir, 'meta_data',
f'scannetv2_{split}.txt')
mmcv.check_file_exist(split_file)
self.sample_id_list = mmcv.list_from_file(split_file)
def __len__(self):
return len(self.sample_id_list)
def get_box_label(self, idx):
box_file = osp.join(self.root_dir, 'scannet_train_instance_data',
f'{idx}_bbox.npy')
mmcv.check_file_exist(box_file)
return np.load(box_file)
def get_infos(self, num_workers=4, has_label=True, sample_id_list=None):
"""Get data infos.
This method gets information from the raw data.
Args:
num_workers (int): Number of threads to be used. Default: 4.
has_label (bool): Whether the data has label. Default: True.
sample_id_list (list[int]): Index list of the sample.
Default: None.
Returns:
infos (list[dict]): Information of the raw data.
"""
def process_single_scene(sample_idx):
print(f'{self.split} sample_idx: {sample_idx}')
info = dict()
pc_info = {'num_features': 6, 'lidar_idx': sample_idx}
info['point_cloud'] = pc_info
pts_filename = osp.join(self.root_dir,
'scannet_train_instance_data',
f'{sample_idx}_vert.npy')
pts_instance_mask_path = osp.join(self.root_dir,
'scannet_train_instance_data',
f'{sample_idx}_ins_label.npy')
pts_semantic_mask_path = osp.join(self.root_dir,
'scannet_train_instance_data',
f'{sample_idx}_sem_label.npy')
points = np.load(pts_filename)
pts_instance_mask = np.load(pts_instance_mask_path).astype(np.long)
pts_semantic_mask = np.load(pts_semantic_mask_path).astype(np.long)
mmcv.mkdir_or_exist(osp.join(self.root_dir, 'points'))
mmcv.mkdir_or_exist(osp.join(self.root_dir, 'instance_mask'))
mmcv.mkdir_or_exist(osp.join(self.root_dir, 'semantic_mask'))
points.tofile(
osp.join(self.root_dir, 'points', f'{sample_idx}.bin'))
pts_instance_mask.tofile(
osp.join(self.root_dir, 'instance_mask', f'{sample_idx}.bin'))
pts_semantic_mask.tofile(
osp.join(self.root_dir, 'semantic_mask', f'{sample_idx}.bin'))
info['pts_path'] = osp.join('points', f'{sample_idx}.bin')
info['pts_instance_mask_path'] = osp.join('instance_mask',
f'{sample_idx}.bin')
info['pts_semantic_mask_path'] = osp.join('semantic_mask',
f'{sample_idx}.bin')
if has_label:
annotations = {}
boxes_with_classes = self.get_box_label(
sample_idx) # k, 6 + class
annotations['gt_num'] = boxes_with_classes.shape[0]
if annotations['gt_num'] != 0:
minmax_boxes3d = boxes_with_classes[:, :-1] # k, 6
classes = boxes_with_classes[:, -1] # k, 1
annotations['name'] = np.array([
self.label2cat[self.cat_ids2class[classes[i]]]
for i in range(annotations['gt_num'])
])
annotations['location'] = minmax_boxes3d[:, :3]
annotations['dimensions'] = minmax_boxes3d[:, 3:6]
annotations['gt_boxes_upright_depth'] = minmax_boxes3d
annotations['index'] = np.arange(
annotations['gt_num'], dtype=np.int32)
annotations['class'] = np.array([
self.cat_ids2class[classes[i]]
for i in range(annotations['gt_num'])
])
info['annos'] = annotations
return info
sample_id_list = sample_id_list if sample_id_list is not None \
else self.sample_id_list
with futures.ThreadPoolExecutor(num_workers) as executor:
infos = executor.map(process_single_scene, sample_id_list)
return list(infos)
class ScanNetSegData(object):
"""ScanNet dataset used to generate infos for semantic segmentation task.
Args:
data_root (str): Root path of the raw data.
ann_file (str): The generated scannet infos.
split (str): Set split type of the data. Default: 'train'.
num_points (int): Number of points in each data input. Default: 8192.
label_weight_func (function): Function to compute the label weight.
Default: None.
"""
def __init__(self,
data_root,
ann_file,
split='train',
num_points=8192,
label_weight_func=None):
self.data_root = data_root
self.data_infos = mmcv.load(ann_file)
self.split = split
self.num_points = num_points
self.all_ids = np.arange(41) # all possible ids
self.cat_ids = np.array([
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36,
39
]) # used for seg task
self.ignore_index = len(self.cat_ids)
self.cat_id2class = np.ones((self.all_ids.shape[0],), dtype=np.int) * \
self.ignore_index
for i, cat_id in enumerate(self.cat_ids):
self.cat_id2class[cat_id] = i
# label weighting function is taken from
# https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24
self.label_weight_func = (lambda x: 1.0 / np.log(1.2 + x)) if \
label_weight_func is None else label_weight_func
def get_seg_infos(self):
scene_idxs, label_weight = self.get_scene_idxs_and_label_weight()
save_folder = osp.join(self.data_root, 'seg_info')
mmcv.mkdir_or_exist(save_folder)
np.save(
osp.join(save_folder, f'{self.split}_resampled_scene_idxs.npy'),
scene_idxs)
np.save(
osp.join(save_folder, f'{self.split}_label_weight.npy'),
label_weight)
print(f'{self.split} resampled scene index and label weight saved')
def _convert_to_label(self, mask):
"""Convert class_id in loaded segmentation mask to label."""
if isinstance(mask, str):
if mask.endswith('npy'):
mask = np.load(mask)
else:
mask = np.fromfile(mask, dtype=np.long)
# first filter out unannotated points (labeled as 0)
mask = mask[mask != 0]
# then convert to [0, 20) labels
label = self.cat_id2class[mask]
return label
def get_scene_idxs_and_label_weight(self):
"""Compute scene_idxs for data sampling and label weight for loss \
calculation.
We sample more times for scenes with more points. Label_weight is
inversely proportional to number of class points.
"""
num_classes = len(self.cat_ids)
num_point_all = []
label_weight = np.zeros((num_classes + 1, )) # ignore_index
for data_info in self.data_infos:
label = self._convert_to_label(
osp.join(self.data_root, data_info['pts_semantic_mask_path']))
num_point_all.append(label.shape[0])
class_count, _ = np.histogram(label, range(num_classes + 2))
label_weight += class_count
# repeat scene_idx for num_scene_point // num_sample_point times
sample_prob = np.array(num_point_all) / float(np.sum(num_point_all))
num_iter = int(np.sum(num_point_all) / float(self.num_points))
scene_idxs = []
for idx in range(len(self.data_infos)):
scene_idxs.extend([idx] * round(sample_prob[idx] * num_iter))
scene_idxs = np.array(scene_idxs).astype(np.int32)
# calculate label weight, adopted from PointNet++
label_weight = label_weight[:-1].astype(np.float32)
label_weight = label_weight / label_weight.sum()
label_weight = self.label_weight_func(label_weight).astype(np.float32)
return scene_idxs, label_weight
```
#### File: tools/surrdet/analyze_depth_data.py
```python
import os
import random
import numpy as np
CamNames = ['CAM_FRONT', 'CAM_FRONT_RIGHT',
'CAM_BACK_RIGHT', 'CAM_BACK', 'CAM_BACK_LEFT',
'CAM_FRONT_LEFT']
def random_sample(data_path, ):
file_names = os.listdir(data_path)
file_names = [f for f in file_names if f.endswith('.npy')]
sample_file_names = random.choices(file_names, k=1000)
ret = [os.path.join(data_path, f) for f in sample_file_names]
return ret
def analyze(data_path):
random_sample_paths = random_sample(data_path)
totoal_point_counts = 0
counts_dict = {v:0 for v in CamNames}
div_dict = {v:0 for v in CamNames}
for path in random_sample_paths:
pts = np.load(path)
totoal_point_counts += pts.shape[0]
count = pts.shape[0]
for cam_name in CamNames:
if path.find(cam_name) != -1:
print(path, cam_name)
counts_dict[cam_name] += count
div_dict[cam_name] += 1
print('avg point coumts = {}'.format(totoal_point_counts/len(random_sample_paths)))
for cam_name in CamNames:
print('Number of avg points for {} is {}.'.format(cam_name, counts_dict[cam_name]/div_dict[cam_name], div_dict[cam_name]))
if __name__ == '__main__':
analyze('data/nuscenes/depth_maps/train/depth_data')
```
#### File: tools/surrdet/generate_sf_input.py
```python
import argparse
from matplotlib.pyplot import sca
import mmcv
import os
import torch
import warnings
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet3d.apis import single_gpu_test
from mmdet3d.datasets import build_dataloader, build_dataset
from mmdet3d.models import build_detector
from mmdet.apis import multi_gpu_test, set_random_seed
from mmdet.datasets import replace_ImageToTensor
from tools.surrdet.utils import disp2rgb
from PIL import Image
import numpy as np
from mmcv.parallel.scatter_gather import scatter
import json
from tqdm import tqdm
CamNames = ['CAM_FRONT', 'CAM_FRONT_RIGHT',
'CAM_BACK_RIGHT', 'CAM_BACK', 'CAM_BACK_LEFT',
'CAM_FRONT_LEFT']
def parse_output(output, save_dir):
"""
in model.eval_forward:
outputs = {'inv_depth_preds': now_inv_depth_preds,
'cam_intrinsic': now_cam_intrin,
'cam_pose': now_cam_pose,
'imgs': now_imgs}
"""
disp_preds = output['inv_depth_preds'][0] # [6*1, 1, H, W]
imgs = output['imgs'].permute(0, 2, 3, 1).detach().cpu().numpy() # [6*1, H, W, C]
disp_rgb = disp2rgb(disp_preds) # [6*1, H, W, C]
#from IPython import embed
#embed()
disp_rgb_list = np.split(disp_rgb, 6, axis=0)
disp_rgb = np.concatenate(disp_rgb_list, axis=2)[0]
img_list = np.split(imgs, 6, axis=0) # [1, H, W, 3]
imgs = np.concatenate(img_list, axis=2)[0]
visual_map = np.concatenate([disp_rgb, imgs], axis=0) * 255.0
visual_map = visual_map.astype(np.uint8)
visual_im = Image.fromarray(visual_map)
visual_im.save(os.path.join(save_dir, 'visualize.png'))
return disp_rgb_list, img_list, visual_im
def merge_output(merged_output_list, save_dir):
list_of_img_seq = [[], [], [], [], [], []]
for output_list in merged_output_list:
disp_rgb_list, img_list, visual_im = output_list
i = 0
for disp, img in zip(disp_rgb_list, img_list):
ret = np.concatenate([disp, img], axis=1)[0] * 255 # [1, 2H, W, C]
im = Image.fromarray(ret.astype(np.uint8))
list_of_img_seq[i].append(im)
i += 1
for img_seq, cam_name in zip(list_of_img_seq, CamNames):
img_seq[0].save(os.path.join(save_dir, '{}.gif'.format(cam_name)), format='GIF', append_images=img_seq[1:], save_all=True)
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out_dir', help='output parse files', default='/public/MARS/datasets/nuScenes-SF/SF-Input')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument('--seed', type=int, default=12345, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both specified, '
'--options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from plguin/xx, registry will be updated
if hasattr(cfg, 'plugin') & cfg.plugin:
import importlib
if hasattr(cfg, 'plugin_dir'):
plugin_dir = cfg.plugin_dir
_module_dir = os.path.dirname(plugin_dir)
_module_dir = _module_dir.split('/')
_module_path = _module_dir[0]
for m in _module_dir[1:]:
_module_path = _module_path + '.' + m
print(_module_path)
plg_lib = importlib.import_module(_module_path)
else:
# import dir is the dirpath for the config file
_module_dir = os.path.dirname(args.config)
_module_dir = _module_dir.split('/')
_module_path = _module_dir[0]
for m in _module_dir[1:]:
_module_path = _module_path + '.' + m
print(_module_path)
plg_lib = importlib.import_module(_module_path)
# import modules from string list.
# if cfg.get('custom_imports', None):
# from mmcv.utils import import_modules_from_strings
# import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
# in case the test dataset is concatenated
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(
cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max(
[ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if samples_per_gpu > 1:
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# set random seeds
if args.seed is not None:
set_random_seed(args.seed, deterministic=args.deterministic)
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
if not os.path.exists(args.out_dir):
os.mkdir(args.out_dir)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
#from IPython import embed
#embed()
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
#if args.checkpoint is not None:
# checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
if not distributed:
model = MMDataParallel(model, device_ids=[0])
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
model.eval()
meta_json = {}
print('len of data loader: ', len(data_loader))
for i, data in tqdm(enumerate(data_loader)):
with torch.no_grad():
data = scatter(data, [-1])[0]
for k, v in data.items():
if isinstance(v, torch.Tensor):
data[k] = v.cuda()
key_img_path = data['img_metas'][0]['filename']
key_img_name = os.path.join(*key_img_path.split('/')[2:])
key_img_filename = key_img_path.split('/')[-1]
save_path = os.path.join(args.out_dir, key_img_filename)
outputs = model.module.preprocess_forward(data)
outputs = outputs.detach().cpu().numpy()
np.save(save_path, outputs)
meta_json[key_img_name] = save_path + '.npy'
with open(os.path.join(args.out_dir, 'sf_inp_val_meta_{}.json'.format(args.local_rank)), 'w') as f:
json.dump(meta_json, f)
if __name__ == '__main__':
main()
```
#### File: tools/surrdet/get_extrinsics.py
```python
from tqdm import tqdm
import json
from nuscenes.nuscenes import NuScenes
from pyquaternion import Quaternion
import numpy as np
SPLITS = {'val': 'v1.0-trainval-val', 'train': 'v1.0-trainval', 'test': 'v1.0-test'}
CamNames = ['CAM_FRONT', 'CAM_FRONT_RIGHT',
'CAM_BACK_RIGHT', 'CAM_BACK', 'CAM_BACK_LEFT',
'CAM_FRONT_LEFT']
def quat_trans2matrix(quant, translation):
quant_matrix = Quaternion(quant).rotation_matrix
translation = np.array(translation)
# shape [3, 4]
matrix = np.concatenate([quant_matrix, translation[:, np.newaxis]], axis=-1)
last_line = np.array([0.0, 0.0, 0.0, 1.0])
# shape [4, 4]
matrix_full = np.concatenate([matrix, last_line[np.newaxis, ]], axis=0)
return matrix_full
def get_pose_intrinsic(save_path='/public/MARS/datasets/nuScenes-SF/meta/cam_pose_intrinsic.json'):
split = 'train'
data_path = 'data/nuscenes/'
nusc = NuScenes(
version=SPLITS[split], dataroot=data_path, verbose=True)
samples = nusc.sample
cam_token2cam_intext = {}
for sample in tqdm(samples):
for cam_name in CamNames:
cam_token = sample['data'][cam_name]
cam_data = nusc.get('sample_data', cam_token)
ego_pose = nusc.get('ego_pose', cam_data['ego_pose_token'])
cam_cs = nusc.get('calibrated_sensor', cam_data['calibrated_sensor_token'])
# used to transform from ego to global
pose_matrix = quat_trans2matrix(ego_pose['rotation'], ego_pose['translation'])
# used to transform from cameral to ego
cam_pose = quat_trans2matrix(cam_cs['rotation'], cam_cs['translation'])
cam_pose_world = np.matmul(pose_matrix, cam_pose)
ret = {'pose': cam_pose_world.tolist()}
ret['intrinsic'] = cam_cs['camera_intrinsic']
cam_token2cam_intext[cam_token] = ret
with open(save_path, 'w') as f:
json.dump(cam_token2cam_intext, f)
def get_pose_intrinsic_v2(save_path='/public/MARS/datasets/nuScenes-SF/meta/cam_pose_intrinsic_v3.json'):
split = 'train'
data_path = 'data/nuscenes/'
nusc = NuScenes(
version=SPLITS[split], dataroot=data_path, verbose=True)
samples = nusc.sample
cam_token2cam_intext = {}
pointer_keys = ['prev', 'next']
for sample in tqdm(samples):
for cam_name in CamNames:
cam_token = sample['data'][cam_name]
cam_tokens = [cam_token]
cam_data = nusc.get('sample_data', cam_token)
for key in pointer_keys:
new_token = cam_data[key]
if new_token == '':
continue
cam_tokens.append(new_token)
for cam_token in cam_tokens:
cam_data = nusc.get('sample_data', cam_token)
ego_pose = nusc.get('ego_pose', cam_data['ego_pose_token'])
cam_cs = nusc.get('calibrated_sensor', cam_data['calibrated_sensor_token'])
# used to transform from ego to global
pose_matrix = quat_trans2matrix(ego_pose['rotation'], ego_pose['translation'])
# used to transform from cameral to ego
cam_pose = quat_trans2matrix(cam_cs['rotation'], cam_cs['translation'])
cam_pose_world = np.matmul(pose_matrix, cam_pose)
ret = {'pose': cam_pose_world.tolist()}
ret['intrinsic'] = cam_cs['camera_intrinsic']
ret['timestamp'] = cam_data['timestamp']
print(cam_data['timestamp'])
cam_token2cam_intext[cam_token] = ret
with open(save_path, 'w') as f:
json.dump(cam_token2cam_intext, f)
if __name__ == '__main__':
get_pose_intrinsic_v2()
``` |
{
"source": "a1647519517/HotspotMonitoringFrontend",
"score": 2
} |
#### File: crawler/toutiao/singlePass.py
```python
import numpy as np
import jieba
import jieba.analyse
from gensim import corpora, models, matutils
from textrank4zh import TextRank4Sentence # 关键词和关键句提取
import pymongo
import time
import datetime
import difflib
import re
class Single_Pass_Cluster(object):
def __init__(self,
origin_data,
stop_words_file,
generate_time):
self.origin_data = origin_data
self.stop_words_file = stop_words_file
self.result = []
self.generate_time = generate_time
def loadData(self):
# 以列表的形式读取文档
texts = []
i = 0
for i in self.origin_data:
texts.append({
'text': i['content'].strip(),
'data': i
})
return texts
def word_segment(self, tocut_data):
# 对语句进行分词,并去掉停用词
stopwords = [line.strip() for line in open(self.stop_words_file, encoding='utf-8').readlines()]
segmentation = []
for i in range(0, len(tocut_data)):
cut_word = []
tocut = tocut_data[i]
for j in tocut.pop('text'):
words = jieba.cut(j)
for word in words:
if word == ' ':
continue
if word not in stopwords:
cut_word.append(word)
tocut['word_segmentation'] = cut_word
segmentation.append(tocut)
return segmentation
def get_Tfidf_vector_representation(self, word_segmentation, pivot=10, slope=0.1):
# 得到文档的空间向量
word_segmentation = [i['word_segmentation'] for i in word_segmentation]
dictionary = corpora.Dictionary(word_segmentation) # 获取分词后词汇和词汇id的映射关系,形成字典
corpus = [dictionary.doc2bow(text) for text in word_segmentation] # 得到语句的向量表示
tfidf = models.TfidfModel(corpus, pivot=pivot, slope=slope) # 进一步获取语句的TF-IDF向量表示
corpus_tfidf = tfidf[corpus]
return corpus_tfidf
def getMaxSimilarity(self, dictTopic, vector):
# 计算新进入文档和已有文档的文本相似度,这里的相似度采用的是cosine余弦相似度
maxValue = 0
maxIndex = -1
for k, cluster in dictTopic.items():
oneSimilarity = np.mean([matutils.cossim(vector, v) for v in cluster])
if oneSimilarity > maxValue:
maxValue = oneSimilarity
maxIndex = k
return maxIndex, maxValue
def single_pass(self, corpus, texts, theta):
dictTopic = {}
clusterTopic = {}
numTopic = 0
cnt = 0
for vector, text in zip(corpus, texts):
if numTopic == 0:
dictTopic[numTopic] = []
dictTopic[numTopic].append(vector)
clusterTopic[numTopic] = []
clusterTopic[numTopic].append(text)
numTopic += 1
else:
maxIndex, maxValue = self.getMaxSimilarity(dictTopic, vector)
# 以第一篇文档为种子,建立一个主题,将给定语句分配到现有的、最相似的主题中
if maxValue > theta:
dictTopic[maxIndex].append(vector)
clusterTopic[maxIndex].append(text)
# 或创建一个新的主题
else:
dictTopic[numTopic] = []
dictTopic[numTopic].append(vector)
clusterTopic[numTopic] = []
clusterTopic[numTopic].append(text)
numTopic += 1
cnt += 1
if cnt % 1000 == 0:
print("processing {}...".format(cnt))
return dictTopic, clusterTopic
def fit_transform(self, theta):
# 得出最终的聚类结果:包括聚类的标号、每个聚类的数量、关键主题词和关键语句
datMat = self.loadData()
word_segmentation = []
word_segmentation = self.word_segment(datMat) # 分词完毕
print("............................................................................................")
print('文本已经分词完毕 !')
# 得到文本数据的空间向量表示
corpus_tfidf = self.get_Tfidf_vector_representation(word_segmentation)
dictTopic, clusterTopic = self.single_pass(corpus_tfidf, datMat, theta)
print("............................................................................................")
print("得到的主题数量有: {} 个 ...".format(len(dictTopic)))
print("............................................................................................\n")
# 按聚类语句数量对聚类结果进行降序排列,找到重要的聚类群
clusterTopic_list = sorted(clusterTopic.items(), key=lambda x: len(x[1]), reverse=True)
print(clusterTopic_list)
for k in clusterTopic_list:
cluster_title = ''
total_forward_comment_like = 0
urls_to_print = ''
related_data = []
for item in k[1]:
cluster_title += item['data']['content']
# total_forward_comment_like += item['data']['hotspot_data']['forward_comment_like']
urls_to_print += '\n' + item['data']['hotspot_data']['url']
related_data.append(item['data']['hotspot_data'])
# 得到每个聚类中的主题关键词
jieba.analyse.set_stop_words(self.stop_words_file)
w_list = jieba.analyse.extract_tags(cluster_title, topK=15)
# 得到每个聚类中的关键主题句TOP3
sentence = TextRank4Sentence(stop_words_file=self.stop_words_file)
sentence.analyze(text=cluster_title, lower=True)
s_list = sentence.get_key_sentences(num=3, sentence_min_len=3)
keywords = '/'.join([i for i in w_list])
keysentences = '\n'.join([i.sentence for i in s_list])
keysentences_to_save = [i.sentence for i in s_list]
# print(
# "主题文档数】:{} \n【主题关键词】:{} \n【主题中心句】 :\n{} \n【转评赞总数】 :\n{} \n【链接】 :\n{}".format(len(k[1]), keywords, keysentences, total_forward_comment_like, urls_to_print))
# print("-------------------------------------------------------------------------")
if len(k[1]) > 2:
self.result.append({
'related_content_count': len(k[1]),
'keywords': keywords,
'keysentences': keysentences_to_save,
'forward_comment_like': -1,
'new_forward_comment_like': -1,
'related_data': related_data,
'generate_time': self.generate_time
})
return self.result
def check_similarity(self, original, latest, delta):
"""
:param original: 旧热点话题 [ hotspot ...]
:param latest: 新热点话题 [ hotspot ...]
:return: intersection: 二者的结合 [ hotspot ...]
"""
intersection = []
old = original[:] # 这里进行列表复制是防止下面对数据的修改影响到元数据,因此取交并集的部分用索引定位元数据
new = latest[:]
for i_idx, i in enumerate(new):
for j_idx, j in enumerate(old):
similarity = difflib.SequenceMatcher(None, i['keywords'], j['keywords']).quick_ratio() # 统计相似度
if similarity >= delta:
print(i['keywords'] + '\n' + j['keywords'] + '\n')
print(similarity)
# 如果50%相似,则取交集
intersection.append({
'_id': old[j_idx]['_id'], # id为旧id
'related_content_count': old[j_idx]['related_content_count'] + new[i_idx]['related_content_count'], # 相关内容数求和
'keywords': new[i_idx]['keywords'], # 关键词更新
'keysentences': new[i_idx]['keysentences'], # 关键句更新
'forward_comment_like': old[j_idx]['new_forward_comment_like'], # 将旧数据的转评赞保留到这
'new_forward_comment_like': -1, # 新的转评赞会在聚类之后更新
'related_data': old[j_idx]['related_data'] + new[i_idx]['related_data'], # 相关内容数据取并集
'generate_time': old[j_idx]['generate_time'] # 生成时间不变
})
# 把原数据删除,防止列表拼接时出现重复数据
original.remove(original[j_idx])
latest.remove(latest[i_idx])
intersection += original + latest
return intersection
if __name__ == '__main__':
database_ip = 'localhost'
database_port = 27017
database_name = 'toutiao'
client = pymongo.MongoClient(database_ip, database_port)
db = client[database_name]
crawler_col = db['red_news']
hotspot_col = db['hotspot']
hotspot2_col = db['hotspot_2']
end = datetime.datetime.now()
delta = datetime.timedelta(days=2)
# delta2 = datetime.timedelta(days=5)
start = end - delta
# end = start + delta
result = crawler_col.find({
'hotspot_data.time': {
'$gt': start.strftime('%Y-%m-%d %H:%M:%S'),
'$lt': end.strftime('%Y-%m-%d %H:%M:%S')
}
})
time_start = time.time()
a = Single_Pass_Cluster(origin_data=result, stop_words_file='stop_words.txt', generate_time=end.strftime('%Y-%m-%d %H:%M:%S'))
result = a.fit_transform(theta=0.25)
# 备份下旧数据,出错了可以用来分析
hotspot2_col.delete_many({})
hotspot2_col.insert_many(result)
time_end = time.time()
print('聚类用时:', time_end - time_start)
# 检查相似度,进行合并
previous_result = list(hotspot_col.find())
intersection = []
if len(previous_result) == 0:
intersection = result
else:
intersection = a.check_similarity(previous_result, result, 0.65)
hotspot_col.delete_many({})
hotspot_col.insert_many(intersection)
# 网易新闻评论页:url: https://3g.163.com/touch/comment.html?docid= id
# 在此页html中获取productKey,在数据库中获取ID,使用下面的页面获取所有评论及点赞
# 获取所有评论及点赞:https://comment.api.163.com/api/v1/products/ proructKey /threads/ ID ?ibc=newswap
```
#### File: crawler/wangyi/toutiao_update.py
```python
from lxml import etree
import time
from selenium.webdriver import Chrome
from selenium.webdriver import ChromeOptions
import pymongo
import random
header_list = [
'user-agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36"',
'user-agent="Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"',
'user-agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"'
]
# def getData():
# client = pymongo.MongoClient('mongodb://localhost:27017/')
# db = client['toutiao']
# col = db['hotspot']
# data = col.find({}, {"related_data": 1,"forward_comment_like":1,"new_forward_comment_like":1,"_id":0})
# print(data)
# return data
def main():
client = pymongo.MongoClient('mongodb://localhost:27017/')
db = client['crawler']
col = db['hotspot']
data = col.find({}, {"related_data": 1, "forward_comment_like": 1, "new_forward_comment_like": 1, "_id": 1})
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
}
option = ChromeOptions()
prefs = {"profile.managed_default_content_settings.images": 2, 'permissions.default.stylesheet': 2}
option.add_experimental_option("prefs", prefs)
# option.add_argument("--proxy-server=http://172.16.58.3")
option.add_experimental_option('excludeSwitches', ['enable-automation'])
header = random.choice(header_list)
option.add_argument(header)
option.add_argument('--headless')
option.add_argument('--disable-gpu') # 无头浏览器
driver = Chrome(options=option)
num = 0
comment = []
for i in data:
sum = 0
print('正在下载第', num+1, '类:')
forward_comment_like = i["new_forward_comment_like"]
for j in range(len(i["related_data"])):
driver.get(i["related_data"][j]["url"])
tree = etree.HTML(driver.page_source)
comment_num = tree.xpath('//div[@id="comment"]/@comments_count')
print("评论数",comment_num)
sum = sum + int(comment_num[0])
num = num + 1
db['hotspot'].update_one(
{'_id': i['_id']}, {
'$set': {
'forward_comment_like': forward_comment_like,
'new_forward_comment_like': sum
}
})
print('总的为',sum)
# test = col.find({}, {"related_data": 1, "forward_comment_like": 1, "new_forward_comment_like": 1, "_id": 1})
# for a in test:
# print(a)
if __name__ == '__main__':
main()
```
#### File: crawler/zhihu/pyppeteerLearn.py
```python
import asyncio
from pyppeteer import launch
import aiohttp
import pymongo
import jieba
import jieba.analyse as analyse
import collections
import time
from textrank4zh import TextRank4Keyword, TextRank4Sentence
import re
search_url = 'https://www.zhihu.com/search?range=1w&type=content&q='
get_answers_by_id_url = 'https://www.zhihu.com/api/v4/questions/{id}/answers?include=data[*].comment_count,content,editable_content,voteup_count&limit=20&offset={offset}&platform=desktop&sort_by=updated'
cookies = {
'z_c0': '2|1:0|10:1586419379|4:z_c0|80:MS4xR2NUakR3QUFBQUFtQUFBQVlBSlZUYk1rZkY4Q1NFMVFKX2hmV0xvRXZpNmdxLUVPcnZ1Z2ZnPT0=|3d209a9dbad2de0a571ddf85e0420385e06ae07072d1d4cc01a935f6112ecf1d',
# 'domain': '.zhihu.com',
# 'path': '/'
}
mongo_client = pymongo.MongoClient('mongodb://localhost:27017/')
db = mongo_client['zhihu']
col = db['zhihu']
async def intercept_request(req):
if req.resourceType in ["image", "media", "websocket", "stylesheet", "font"]:
await req.abort()
else:
await req.continue_()
async def get_answers(url, client):
print(url)
await asyncio.sleep(2)
resp = await client.get(url)
a = await resp.json()
if a['paging']['is_end'] is False:
return a['data']
else:
return 0
def html_wash(txt):
# string = re.findall('[\u3002\uff1b\uff0c\uff1a\u201c\u201d\uff08\uff09\u3001\uff1f\u300a\u300b\u4e00-\u9fa5]',txt)
return re.sub(r'</?\w+[^>]*>', '', txt)
async def search():
browser = await launch({
'headless': False,
'executablePath': 'C:\\Users\\16475\\AppData\\Local\\pyppeteer\\pyppeteer\\local_chromium\\575458\\chrome-win32\\chrome.exe'
})
page = await browser.newPage()
await page.setViewport(viewport={'width': 1280, 'height': 800})
await page.setJavaScriptEnabled(enabled=True)
await page.setUserAgent(
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.162 Safari/537.36'
)
await page.setCookie({
"name": 'z_c0',
'value': '2|1:0|10:1586419379|4:z_c0|80:MS4xR2NUakR3QUFBQUFtQUFBQVlBSlZUYk1rZkY4Q1NFMVFKX2hmV0xvRXZpNmdxLUVPcnZ1Z2ZnPT0=|3d209a9dbad2de0a571ddf85e0420385e06ae07072d1d4cc01a935f6112ecf1d',
'domain': '.zhihu.com',
'path': '/'
})
await page.setRequestInterception(True)
page.on('request', intercept_request)
question_id_list = []
question_content = []
n = 0
while n < 1:
await page.goto(search_url + '世界卫生组织')
time.sleep(0.5)
elements = await page.querySelectorAll('.ContentItem-title a')
for item in elements:
link = await(await item.getProperty('href')).jsonValue()
print(link)
t_url = link.split('/')
t_question_id = t_url[4]
if len(t_url) > 5:
if t_url[4] in question_id_list: # 如果此问题已经爬过了就跳过 这里冗余了,因为每次底部刷新必会出现爬过的链接,有待改良
print('------------------------一个爬过的问题--------------------------------')
continue
else:
question_id_list.append(t_question_id) # 写入历史链接
else:
continue
# await page.evaluate('window.scrollBy(0, document.body.scrollHeight)')
await page.reload()
n += 1
time.sleep(0.5)
print(question_id_list)
await browser.close()
async with aiohttp.ClientSession(cookies=cookies) as client:
url_list = (get_answers_by_id_url.format(id=question_id, offset=offset) for question_id in question_id_list for offset in range(0, 81, 20))
tasks_list = (asyncio.create_task(get_answers(url, client)) for url in url_list)
result = await asyncio.gather(*tasks_list)
# tags_list = []
contents = ''
tr4s = TextRank4Sentence()
tr4w = TextRank4Keyword()
# print(result)
q_id = ''
text = ''
for i in result:
if i == 0:
continue
elif q_id == '':
q_id = i[0]['question']['id']
elif i[0]['question']['id'] != q_id:
q_id = i[0]['question']['id']
# tr4w.analyze(text=text, lower=True, window=2)
# for phrase in tr4w.get_keyphrases(keywords_num=10, min_occur_num=2):
# print(phrase)
tr4s.analyze(text=text, lower=True, source='no_stop_words')
for item in tr4s.get_key_sentences(num=5):
print(item.weight, item.sentence)
text = ''
else:
for j in i:
content = html_wash(j['content'])
text += content
print('id:{id}, question_id:{question_id}, title:{title}, content:{content}'.format(id=j['id'], question_id=j['question']['id'], title=j['question']['title'], content=content))
# print(item.index, item.weight, item.sentence) # index是语句在文本中位置,weight是权重
# tags = jieba.analyse.extract_tags(j['content'], topK=10, withWeight=True, allowPOS=('n', 'nr','ns', 'nt', 'v'))
# tags_list.append(dict(tags))
# col.insert_many(tags_list)
# words_count = collections.Counter(keywords)
# print(words_count)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(search())
``` |
{
"source": "a1768794903/StrawberryRobot",
"score": 3
} |
#### File: a1768794903/StrawberryRobot/pick-strawberry.py
```python
import time
import cv2 as cv
import numpy as np
# lib.Arm7Bot is a Python class for PineCone.ai robotic arm
# You should change the following line of code to your own robotic arm driver
from lib.Arm7Bot import Arm7Bot
region_rows = 64
region_cols = 64
def findStraberry( bgr_image ):
rows, cols, _ = bgr_image.shape
#crop the center region of the image
bgr_region = frame[int(rows/2)-region_rows:int(rows/2)+region_rows,
int(cols/2)-region_cols:int(cols/2)+region_cols]
img_hsv=cv.cvtColor(bgr_region, cv.COLOR_BGR2HSV)
# lower mask (0-10)
lower_red = np.array([0,50,50])
upper_red = np.array([10,255,255])
mask0 = cv.inRange(img_hsv, lower_red, upper_red)
# upper mask (170-180)
lower_red = np.array([170,50,50])
upper_red = np.array([180,255,255])
mask1 = cv.inRange(img_hsv, lower_red, upper_red)
# join my masks
maskRed = mask0+mask1
# upper mask (170-180)
lower_green = np.array([20,50,50])
upper_green = np.array([40,255,255])
maskGreen = cv.inRange(img_hsv, lower_green, upper_green)
red_ratio = cv.sumElems(maskRed)
green_ratio = cv.sumElems(maskGreen)
red_ratio = red_ratio[0]/255/region_rows/region_cols/4
green_ratio = green_ratio[0]/255/region_rows/region_cols/4
#draw a rectange on the image to guide
cv.rectangle(bgr_image,
(int(cols/2)-region_cols, int(rows/2)-region_rows),
(int(cols/2)+region_cols, int(rows/2)+region_rows), (255, 0, 0), 3)
# Display the frame
cv.imshow('Camera',frame)
cv.imshow('maskRed', maskRed)
cv.imshow('maskGreen', maskGreen)
if(red_ratio > 0.6):
return 'red'
elif( green_ratio > 0.6):
return 'green'
else:
return 'no strawberry'
# assign serial port to 7Bot.
# ATTENTION: Change the parameter "/dev/cu.SLAB_USBtoUART" below
# according to your own computer OS system. Open whatever port is 7Bot on your computer.
# Usually: "/dev/cu.SLAB_USBtoUART" on Mac OS
# "/dev/ttyUSB0" on Linux
# 'COM1' on Windows
arm = Arm7Bot("/dev/ttyUSB0") #please adjust according to your own robotic arm
# capture frames from a camera with device index=0 by OpenCV
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_BUFFERSIZE, 1); #don't buffer too many images
#the main loop
while True:
# reads frame from a camera
ret, frame = cap.read() #the buffered one, read and throw it
ret, frame = cap.read() #this one
object_color = findStraberry(frame)
print(object_color)
# Wait for 1ms, press q to exit
if cv.waitKey(1) & 0xFF == ord('q'):
break
if(object_color == 'no strawberry'):
continue
# arm.setIK6() is a function to control PineCone.ai robotic arm,
# you should change to functions of your own robotic arm
arm.setIK6([0, 200, 150], [0, 0, -1]) #move to [x=0,y=200,=150]
time.sleep(1)
arm.setIK6([0, 200, 20], [0, 0, -1]) #down
time.sleep(1)
arm.setAngle(6,90) #open hand
time.sleep(1)
arm.setAngle(6,30) #close hand
time.sleep(1)
arm.setIK6([0, 200, 150], [0, 0, -1]) #up
time.sleep(1)
if(object_color == 'red'):
arm.setIK6([-200, 1, 150], [0, 0, -1]) #move to red busket
time.sleep(1)
arm.setIK6([-200, 1, 80], [0, 0, -1]) #down
time.sleep(1)
elif(object_color == 'green'):
arm.setIK6([-140, 140, 150], [0, 0, -1]) #move to green busket
time.sleep(1)
arm.setIK6([-140, 140, 80], [0, 0, -1]) #down
time.sleep(1)
arm.setAngle(6,90) #open hand
time.sleep(1)
arm.setIK6([0, 200, 150], [0, 0, -1]) #move back to [x=0,y=200,=150]
time.sleep(1)
``` |
{
"source": "a17hq/py_trees",
"score": 2
} |
#### File: py_trees/py_trees/idioms.py
```python
import operator
import typing
import uuid
from . import behaviour
from . import behaviours
from . import blackboard
from . import common
from . import composites
from . import decorators
##############################################################################
# Creational Methods
##############################################################################
def pick_up_where_you_left_off(
name="Pickup Where You Left Off Idiom",
tasks=[]):
"""
Rudely interrupted while enjoying a sandwich, a caveman (just because
they wore loincloths does not mean they were not civilised), picks
up his club and fends off the sabre-tooth tiger invading his sanctum
as if he were swatting away a gnat. Task accomplished, he returns
to the joys of munching through the layers of his sandwich.
.. graphviz:: dot/pick_up_where_you_left_off.dot
.. note::
There are alternative ways to accomplish this idiom with their
pros and cons.
a) The tasks in the sequence could be replaced by a
factory behaviour that dynamically checks the state of play and
spins up the tasks required each time the task sequence is first
entered and invalidates/deletes them when it is either finished
or invalidated. That has the advantage of not requiring much of
the blackboard machinery here, but disadvantage in not making
visible the task sequence itself at all times (i.e. burying
details under the hood).
b) A new composite which retains the index between
initialisations can also achieve the same pattern with fewer
blackboard shenanigans, but suffers from an increased
logical complexity cost for your trees (each new composite
increases decision making complexity (O(n!)).
Args:
name (:obj:`str`): the name to use for the task sequence behaviour
tasks ([:class:`~py_trees.behaviour.Behaviour`): lists of tasks to be sequentially performed
Returns:
:class:`~py_trees.behaviour.Behaviour`: root of the generated subtree
"""
root = composites.Sequence(name=name)
for task in tasks:
task_selector = composites.Selector(name="Do or Don't")
task_guard = behaviours.CheckBlackboardVariableValue(
name="Done?",
check=common.ComparisonExpression(
variable=task.name.lower().replace(" ", "_") + "_done",
value=True,
operator=operator.eq
)
)
sequence = composites.Sequence(name="Worker")
mark_task_done = behaviours.SetBlackboardVariable(
name="Mark\n" + task.name.lower().replace(" ", "_") + "_done",
variable_name=task.name.lower().replace(" ", "_") + "_done",
variable_value=True
)
sequence.add_children([task, mark_task_done])
task_selector.add_children([task_guard, sequence])
root.add_child(task_selector)
for task in tasks:
clear_mark_done = behaviours.UnsetBlackboardVariable(
name="Clear\n" + task.name.lower().replace(" ", "_") + "_done",
key=task.name.lower().replace(" ", "_") + "_done"
)
root.add_child(clear_mark_done)
return root
def eternal_guard(
subtree: behaviour.Behaviour,
name: str="Eternal Guard",
conditions: typing.List[behaviour.Behaviour]=[],
blackboard_namespace: str=None) -> behaviour.Behaviour:
"""
The eternal guard idiom implements a stronger :term:`guard` than the typical check at the
beginning of a sequence of tasks. Here they guard continuously while the task sequence
is being executed. While executing, if any of the guards should update with
status :data:`~common.Status.FAILURE`, then the task sequence is terminated.
.. graphviz:: dot/idiom-eternal-guard.dot
:align: center
Args:
subtree: behaviour(s) that actually do the work
name: the name to use on the root behaviour of the idiom subtree
conditions: behaviours on which tasks are conditional
blackboard_namespace: applied to condition variable results stored on the blackboard (default: derived from the idiom name)
Returns:
the root of the idiom subtree
.. seealso:: :class:`py_trees.decorators.EternalGuard`
"""
if blackboard_namespace is None:
blackboard_namespace = name.lower().replace(" ", "_")
blackboard_variable_names = []
# construct simple, easy to read, variable names (risk of conflict)
counter = 1
for condition in conditions:
suffix = "" if len(conditions) == 1 else "_{}".format(counter)
blackboard_variable_names.append(
blackboard.Blackboard.separator +
blackboard_namespace +
"_condition" +
suffix
)
counter += 1
# if there is just one blackboard name already on the blackboard, switch to unique names
conflict = False
for name in blackboard_variable_names:
try:
unused_name = blackboard.Blackboard.get(name)
conflict = True
except KeyError:
pass
if conflict:
blackboard_variable_names = []
counter = 1
unique_id = uuid.uuid4()
for condition in conditions:
suffix = "" if len(conditions) == 1 else "_{}".format(counter)
blackboard_variable_names.append(blackboard_namespace + "_" + str(unique_id) + "_condition" + suffix)
counter += 1
# build the tree
root = composites.Parallel(
name=name,
policy=common.ParallelPolicy.SuccessOnAll(synchronise=False)
)
guarded_tasks = composites.Selector(name="Guarded Tasks")
for condition, blackboard_variable_name in zip(conditions, blackboard_variable_names):
decorated_condition = decorators.StatusToBlackboard(
name="StatusToBB",
child=condition,
variable_name=blackboard_variable_name
)
root.add_child(decorated_condition)
guarded_tasks.add_child(
behaviours.CheckBlackboardVariableValue(
name="Abort on\n{}".format(condition.name),
check=common.ComparisonExpression(
variable=blackboard_variable_name,
value=common.Status.FAILURE,
operator=operator.eq
)
)
)
guarded_tasks.add_child(subtree)
root.add_child(guarded_tasks)
return root
def either_or(
conditions: typing.List[common.ComparisonExpression],
subtrees: typing.List[behaviour.Behaviour],
name="Either Or",
namespace: typing.Optional[str]=None
) -> behaviour.Behaviour:
"""
Often you need a kind of selector that doesn't implement prioritisations, i.e.
you would like different paths to be selected on a first-come, first-served basis.
.. code-block:: python
task_one = py_trees.behaviours.TickCounter(name="Subtree 1", duration=2)
task_two = py_trees.behaviours.TickCounter(name="Subtree 2", duration=2)
either_or = py_trees.idioms.either_or(
name="EitherOr",
conditions=[
py_trees.common.ComparisonExpression("joystick_one", "enabled", operator.eq),
py_trees.common.ComparisonExpression("joystick_two", "enabled", operator.eq),
],
subtrees=[task_one, task_two],
namespace="either_or",
)
.. graphviz:: dot/idiom-either-or.dot
:align: center
:caption: Idiom - Either Or
Up front is an XOR conditional check which locks in the result on the blackboard
under the specified namespace. Locking the result in permits the conditional
variables to vary in future ticks without interrupting the execution of the
chosen subtree (an example of a conditional variable may be one that has
registered joystick button presses).
Once the result is locked in, the relevant subtree is activated beneath the
selector. The children of the selector are, from left to right, not in any
order of priority since the previous xor choice has been locked in and isn't
revisited until the subtree executes to completion. Only one
may be active and it cannot be interrupted by the others.
The only means of interrupting the execution is via a higher priority in the
tree that this idiom is embedded in.
Args:
conditions: list of triggers that ultimately select the subtree to enable
subtrees: list of subtrees to tick from in the either_or operation
name: the name to use for this idiom's root behaviour
preemptible: whether the subtrees may preempt (interrupt) each other
namespace: this idiom's private variables will be put behind this namespace
Raises:
ValueError if the number of conditions does not match the number of subtrees
If no namespace is provided, a unique one is derived from the idiom's name.
.. seealso:: :ref:`py-trees-demo-either-or <py-trees-demo-either-or-program>`
.. todo:: a version for which other subtrees can preempt (in an unprioritised manner) the active branch
"""
if len(conditions) != len(subtrees):
raise ValueError("Must be the same number of conditions as subtrees [{} != {}]".format(
len(conditions), len(subtrees))
)
root = composites.Sequence(name=name)
configured_namespace: str = namespace if namespace is not None else \
blackboard.Blackboard.separator + name.lower().replace("-", "_").replace(" ", "_") + \
blackboard.Blackboard.separator + str(root.id).replace("-", "_").replace(" ", "_") + \
blackboard.Blackboard.separator + "conditions"
xor = behaviours.CheckBlackboardVariableValues(
name="XOR",
checks=conditions,
operator=operator.xor,
namespace=configured_namespace
)
chooser = composites.Selector(name="Chooser")
for counter in range(1, len(conditions) + 1):
sequence = composites.Sequence(name="Option {}".format(str(counter)))
variable_name = configured_namespace + blackboard.Blackboard.separator + str(counter)
disabled = behaviours.CheckBlackboardVariableValue(
name="Enabled?",
check=common.ComparisonExpression(
variable=variable_name,
value=True,
operator=operator.eq
)
)
sequence.add_children([disabled, subtrees[counter - 1]])
chooser.add_child(sequence)
root.add_children([xor, chooser])
return root
def oneshot(
behaviour: behaviour.Behaviour,
name: str="Oneshot",
variable_name: str="oneshot",
policy: common.OneShotPolicy=common.OneShotPolicy.ON_SUCCESSFUL_COMPLETION
) -> behaviour.Behaviour:
"""
Ensure that a particular pattern is executed through to
completion just once. Thereafter it will just rebound with the completion status.
.. graphviz:: dot/oneshot.dot
.. note::
Set the policy to configure the oneshot to keep trying if failing, or to abort
further attempts regardless of whether it finished with status
:data:`~py_trees.common.Status.SUCCESS`||:data:`~py_trees.common.Status.FAILURE`.
Args:
behaviour: single behaviour or composited subtree to oneshot
name: the name to use for the oneshot root (selector)
variable_name: name for the variable used on the blackboard, may be nested
policy: execute just once regardless of success or failure, or keep trying if failing
Returns:
:class:`~py_trees.behaviour.Behaviour`: the root of the oneshot subtree
.. seealso:: :class:`py_trees.decorators.OneShot`
"""
subtree_root = composites.Selector(name=name)
oneshot_with_guard = composites.Sequence(
name="Oneshot w/ Guard")
check_not_done = decorators.Inverter(
name="Not Completed?",
child=behaviours.CheckBlackboardVariableExists(
name="Completed?",
variable_name=variable_name
)
)
set_flag_on_success = behaviours.SetBlackboardVariable(
name="Mark Done\n[SUCCESS]",
variable_name=variable_name,
variable_value=common.Status.SUCCESS
)
# If it's a sequence, don't double-nest it in a redundant manner
if isinstance(behaviour, composites.Sequence):
behaviour.add_child(set_flag_on_success)
sequence = behaviour
else:
sequence = composites.Sequence(name="OneShot")
sequence.add_children([behaviour, set_flag_on_success])
oneshot_with_guard.add_child(check_not_done)
if policy == common.OneShotPolicy.ON_SUCCESSFUL_COMPLETION:
oneshot_with_guard.add_child(sequence)
else: # ON_COMPLETION (SUCCESS || FAILURE)
oneshot_handler = composites.Selector(name="Oneshot Handler")
bookkeeping = composites.Sequence(name="Bookkeeping")
set_flag_on_failure = behaviours.SetBlackboardVariable(
name="Mark Done\n[FAILURE]",
variable_name=variable_name,
variable_value=common.Status.FAILURE
)
bookkeeping.add_children(
[set_flag_on_failure,
behaviours.Failure(name="Failure")
])
oneshot_handler.add_children([sequence, bookkeeping])
oneshot_with_guard.add_child(oneshot_handler)
oneshot_result = behaviours.CheckBlackboardVariableValue(
name="Oneshot Result",
check=common.ComparisonExpression(
variable=variable_name,
value=common.Status.SUCCESS,
operator=operator.eq
)
)
subtree_root.add_children([oneshot_with_guard, oneshot_result])
return subtree_root
``` |
{
"source": "a17juanbl/exercicios",
"score": 2
} |
#### File: website_info/controllers/main.py
```python
from odoo import http
from odoo.addons.website.controllers.main import Website
class WebsiteInfo(Website):
@http.route()
def website_info(self):
result = super(WebsiteInfo, self).website_info()
result.qcontext['apps'] = result.qcontext['apps'].filtered(
lambda x: x.name != 'website'
)
return result
```
#### File: my_library/models/models.py
```python
from collections import defaultdict
from datetime import datetime
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models
class Base(models.AbstractModel):
_inherit = 'base'
@api.model
def get_m2m_group_data(self, domain, m2m_field):
records = self.search(domain)
result_dict = {}
for record in records:
for m2m_record in record[m2m_field]:
if m2m_record.id not in result_dict:
result_dict[m2m_record.id] = {
'name': m2m_record.display_name,
'children': [],
'model': m2m_record._name
}
result_dict[m2m_record.id]['children'].append({
'name': record.display_name,
'id': record.id,
})
return result_dict
```
#### File: my_library/models/library_book_rent.py
```python
import re
from odoo.tools import email_split, email_escape_char
from odoo import models, fields, api
class LibraryBookRent(models.Model):
_name = 'library.book.rent'
_inherit = ['mail.thread', 'mail.activity.mixin']
book_id = fields.Many2one('library.book', 'Book', required=True)
borrower_id = fields.Many2one('res.partner', 'Borrower', required=True)
state = fields.Selection([('ongoing', 'Ongoing'), ('returned', 'Returned')],
'State', default='ongoing', required=True,
track_visibility='always')
rent_date = fields.Date(default=fields.Date.today, track_visibility='onchange')
return_date = fields.Date(track_visibility='onchange')
@api.model
def create(self, vals):
book_rec = self.env['library.book'].browse(vals['book_id']) # returns record set from for given id
book_rec.make_borrowed()
res = super(LibraryBookRent, self).create(vals)
res.message_subscribe(partner_ids=[res.borrower_id.id])
if res.return_date:
res.activity_schedule('mail.mail_activity_data_call', date_deadline=res.return_date)
return res
def book_return(self):
self.ensure_one()
self.book_id.make_available()
self.write({
'state': 'returned',
'return_date': fields.Date.today()
})
def book_return_reminder(self):
template_id = self.env.ref('my_library.book_return_reminder')
self.message_post_with_template(template_id.id)
def book_return_reminder_qweb(self):
self.message_post_with_view('my_library.book_return_reminder_qweb')
@api.model
def message_new(self, msg_dict, custom_values=None):
self = self.with_context(default_user_id=False)
if custom_values is None:
custom_values = {}
regex = re.compile("^\[(.*)\]")
match = regex.match(msg_dict.get('subject')).group(1)
book_id = self.env['library.book'].search([('name', '=', match), ('state', '=', 'available')], limit=1)
custom_values['book_id'] = book_id.id
email_from = email_escape_char(email_split(msg_dict.get('from'))[0])
custom_values['borrower_id'] = self._search_on_partner(email_from)
return super(LibraryBookRent, self).message_new(msg_dict, custom_values)
``` |
{
"source": "a17r/ParaView",
"score": 3
} |
#### File: paraview/web/vtkjs_helper.py
```python
from paraview import simple
import os, json
# -----------------------------------------------------------------------------
def getAllNames():
actorNameMapping = {}
srcs = simple.GetSources()
duplicates = {}
for key, val in srcs.items():
# Prevent name duplication
nameToUse = key[0]
if nameToUse in duplicates:
count = 1
newName = '%s (%d)' % (nameToUse, count)
while newName in duplicates:
count += 1
newName = '%s (%d)' % (nameToUse, count)
nameToUse = newName
duplicates[nameToUse] = True
representation = simple.GetRepresentation(val)
if representation:
vtkRepInstance = representation.GetClientSideObject()
if 'GetActiveRepresentation' in dir(vtkRepInstance):
actorRep = vtkRepInstance.GetActiveRepresentation().GetActor()
actorNameMapping[nameToUse] = actorRep
return actorNameMapping
# -----------------------------------------------------------------------------
def findName(names, actor, defaultName):
for name in names:
if actor == names[name]:
return name
return defaultName
# -----------------------------------------------------------------------------
def getRenameMap():
renameMap = {}
names = getAllNames()
view = simple.GetActiveView()
renderer = view.GetClientSideObject().GetRenderer()
viewProps = renderer.GetViewProps()
idx = 1
for viewProp in viewProps:
if not viewProp.GetVisibility():
continue
if not viewProp.IsA('vtkActor'):
continue
bounds = viewProp.GetBounds()
if bounds[0] > bounds[1]:
continue
# The mapping will fail for multiblock that are composed of several blocks
# Merge block should be used to solve the renaming issue for now
# as the id is based on the a valid block vs representation.
strIdx = '%s' % idx
renameMap[strIdx] = findName(names, viewProp, strIdx)
idx += 1
return renameMap
# -----------------------------------------------------------------------------
def applyParaViewNaming(directoryPath):
renameMap = getRenameMap()
scene = None
filePath = os.path.join(directoryPath, 'index.json')
with open(filePath) as file:
scene = json.load(file)
for item in scene['scene']:
if item['name'] in renameMap:
item['name'] = renameMap[item['name']]
with open(filePath, 'w') as file:
file.write(json.dumps(scene, indent=2))
```
#### File: paraview/demos/filedriver_miniapp.py
```python
r"""
This script is a miniapp that acts a Catalyst-instrumented simulation code.
Instead of doing some computation, however, this script reads the files
specified through command line arguments and provides the data read in as the
simulation data.
Example usage:
mpirun -np 8 ./bin/pvbatch -sym -m paraview.demos.filedriver_miniapp \
-g "/tmp/extracts/Wavelet1_*.pvti" -s /tmp/foo.py
"""
import argparse, time, os.path, glob
parser = argparse.ArgumentParser(\
description="File-based MiniApp for Catalyst testing")
parser.add_argument("-s", "--script", type=str, action="append",
help="path(s) to the Catalyst script(s) to use for in situ processing. Can be a "
".py file or a Python package zip or directory",
required=True)
parser.add_argument("--script-version", type=int,
help="choose Catalyst analysis script version explicitly, otherwise it "
"will be determined automatically. When specifying multiple scripts, this "
"setting applies to all scripts.", default=0)
parser.add_argument("-d", "--delay", type=float,
help="delay (in seconds) between timesteps (default: 0.0)", default=0.0)
parser.add_argument("-c", "--channel", type=str,
help="Catalyst channel name (default: input)", default="input")
parser.add_argument("-g", "--glob", type=str,
help="Pattern to use to locate input filenames.", required=True)
def create_reader(files):
from paraview import simple
reader = simple.OpenDataFile(files)
if not reader:
raise RuntimeError("Failed to create a suitable reader for files: %s", str(files))
return reader
def read_dataset(reader, time, rank, num_ranks):
reader.UpdatePipeline(time)
vtk_reader = reader.GetClientSideObject()
ds = vtk_reader.GetOutputDataObject(0)
if ds.GetExtentType() == 1: # VTK_3D_EXTENT
key = vtk_reader.GetExecutive().WHOLE_EXTENT()
whole_extent = vtk_reader.GetOutputInformation(0).Get(key)[:]
return (ds, whole_extent)
else:
return (ds, None)
def main(args):
"""The main loop"""
# this globbling logic is copied from `filedriver.py`. It may be worth
# cleaning this up to ensure it handles typical use-cases we encounter.
files = glob.glob(args.glob)
# In case the filenames aren't padded we sort first by shorter length and then
# alphabetically. This is a slight modification based on the question by Adrian and answer by
# <NAME> at:
# https://stackoverflow.com/questions/4659524/how-to-sort-by-length-of-string-followed-by-alphabetical-order
files.sort(key=lambda item: (len(item), item))
# initialize Catalyst
from paraview.catalyst import bridge
from paraview import print_info, print_warning
bridge.initialize()
# add analysis script
for script in args.script:
bridge.add_pipeline(script, args.script_version)
# Some MPI related stuff to figure out if we're running with MPI
# and if so, on how many ranks.
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
num_ranks = comm.Get_size()
except ImportError:
print_warning("missing mpi4py, running in serial (non-distributed) mode")
rank = 0
num_ranks = 1
reader = create_reader(files)
timesteps = reader.TimestepValues[:]
step = 0
numsteps = len(timesteps)
for time in timesteps:
step += 1
if args.delay > 0:
import time
time.sleep(args.delay)
if rank == 0:
print_info("timestep: {0}/{1}".format(step, numsteps))
dataset, wholeExtent = read_dataset(reader, time, rank, num_ranks)
# "perform" coprocessing. results are outputted only if
# the passed in script says we should at time/step
bridge.coprocess(time, step, dataset, name=args.channel, wholeExtent=wholeExtent)
del dataset
del wholeExtent
# finalize Catalyst
bridge.finalize()
if __name__ == "__main__":
args = parser.parse_args()
main(args)
``` |
{
"source": "a1846342933/px4framework",
"score": 2
} |
#### File: sitl_gazebo/scripts/xacro.py
```python
from __future__ import print_function
import getopt
import glob
import os
import re
import string
import sys
import xml
from xml.dom.minidom import parse
import substitution_args
from names import load_mappings
try:
_basestr = basestring
except NameError:
_basestr = str
# Dictionary of subtitution args
substitution_args_context = {}
class XacroException(Exception):
pass
def isnumber(x):
return hasattr(x, '__int__')
def eval_extension(str):
return substitution_args.resolve_args(
str, context=substitution_args_context, resolve_anon=False)
# Better pretty printing of xml
# Taken from
# http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-and-silly-whitespace/
def fixed_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent + "<" + self.tagName)
attrs = self._get_attributes()
a_names = sorted(attrs.keys())
for a_name in a_names:
writer.write(" %s=\"" % a_name)
xml.dom.minidom._write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
if len(self.childNodes) == 1 \
and self.childNodes[0].nodeType == xml.dom.minidom.Node.TEXT_NODE:
writer.write(">")
self.childNodes[0].writexml(writer, "", "", "")
writer.write("</%s>%s" % (self.tagName, newl))
return
writer.write(">%s" % (newl))
for node in self.childNodes:
# skip whitespace-only text nodes
if node.nodeType == xml.dom.minidom.Node.TEXT_NODE and \
not node.data.strip():
continue
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % (newl))
# replace minidom's function with ours
xml.dom.minidom.Element.writexml = fixed_writexml
class Table:
def __init__(self, parent=None):
self.parent = parent
self.table = {}
def __getitem__(self, key):
if key in self.table:
return self.table[key]
elif self.parent:
return self.parent[key]
else:
raise KeyError(key)
def __setitem__(self, key, value):
self.table[key] = value
def __contains__(self, key):
return \
key in self.table or \
(self.parent and key in self.parent)
class QuickLexer(object):
def __init__(self, **res):
self.str = ""
self.top = None
self.res = []
for k, v in res.items():
self.__setattr__(k, len(self.res))
self.res.append(v)
def lex(self, str):
self.str = str
self.top = None
self.next()
def peek(self):
return self.top
def next(self):
result = self.top
self.top = None
for i in range(len(self.res)):
m = re.match(self.res[i], self.str)
if m:
self.top = (i, m.group(0))
self.str = self.str[m.end():]
break
return result
def first_child_element(elt):
c = elt.firstChild
while c:
if c.nodeType == xml.dom.Node.ELEMENT_NODE:
return c
c = c.nextSibling
return None
def next_sibling_element(elt):
c = elt.nextSibling
while c:
if c.nodeType == xml.dom.Node.ELEMENT_NODE:
return c
c = c.nextSibling
return None
# Pre-order traversal of the elements
def next_element(elt):
child = first_child_element(elt)
if child:
return child
while elt and elt.nodeType == xml.dom.Node.ELEMENT_NODE:
next = next_sibling_element(elt)
if next:
return next
elt = elt.parentNode
return None
# Pre-order traversal of all the nodes
def next_node(node):
if node.firstChild:
return node.firstChild
while node:
if node.nextSibling:
return node.nextSibling
node = node.parentNode
return None
def child_nodes(elt):
c = elt.firstChild
while c:
yield c
c = c.nextSibling
all_includes = []
# Deprecated message for <include> tags that don't have <xacro:include>
# prepended:
deprecated_include_msg = """DEPRECATED IN HYDRO:
The <include> tag should be prepended with 'xacro' if that is the intended use
of it, such as <xacro:include ...>. Use the following script to fix incorrect
xacro includes:
sed -i 's/<include/<xacro:include/g' `find . -iname *.xacro`"""
include_no_matches_msg = """Include tag filename spec \"{}\" matched no files."""
# @throws XacroException if a parsing error occurs with an included document
def process_includes(doc, base_dir):
namespaces = {}
previous = doc.documentElement
elt = next_element(previous)
while elt:
# Xacro should not use plain 'include' tags but only namespaced ones. Causes conflicts with
# other XML elements including Gazebo's <gazebo> extensions
is_include = False
if elt.tagName == 'xacro:include' or elt.tagName == 'include':
is_include = True
# Temporary fix for ROS Hydro and the xacro include scope problem
if elt.tagName == 'include':
# check if there is any element within the <include> tag. mostly we are concerned
# with Gazebo's <uri> element, but it could be anything. also, make sure the child
# nodes aren't just a single Text node, which is still considered a deprecated
# instance
if elt.childNodes and not (len(elt.childNodes) == 1 and
elt.childNodes[0].nodeType == elt.TEXT_NODE):
# this is not intended to be a xacro element, so we can
# ignore it
is_include = False
else:
# throw a deprecated warning
print(deprecated_include_msg, file=sys.stderr)
# Process current element depending on previous conditions
if is_include:
filename_spec = eval_text(elt.getAttribute('filename'), {})
if not os.path.isabs(filename_spec):
filename_spec = os.path.join(base_dir, filename_spec)
if re.search('[*[?]+', filename_spec):
# Globbing behaviour
filenames = sorted(glob.glob(filename_spec))
if len(filenames) == 0:
print(include_no_matches_msg.format(
filename_spec), file=sys.stderr)
else:
# Default behaviour
filenames = [filename_spec]
for filename in filenames:
global all_includes
all_includes.append(filename)
try:
with open(filename) as f:
try:
included = parse(f)
except Exception as e:
raise XacroException(
"included file \"%s\" generated an error during XML parsing: %s" %
(filename, str(e)))
except IOError as e:
raise XacroException(
"included file \"%s\" could not be opened: %s" %
(filename, str(e)))
# Replaces the include tag with the elements of the included
# file
for c in child_nodes(included.documentElement):
elt.parentNode.insertBefore(c.cloneNode(deep=True), elt)
# Grabs all the declared namespaces of the included document
for name, value in included.documentElement.attributes.items():
if name.startswith('xmlns:'):
namespaces[name] = value
elt.parentNode.removeChild(elt)
elt = None
else:
previous = elt
elt = next_element(previous)
# Makes sure the final document declares all the namespaces of the
# included documents.
for k, v in namespaces.items():
doc.documentElement.setAttribute(k, v)
# Returns a dictionary: { macro_name => macro_xml_block }
def grab_macros(doc):
macros = {}
previous = doc.documentElement
elt = next_element(previous)
while elt:
if elt.tagName == 'macro' or elt.tagName == 'xacro:macro':
name = elt.getAttribute('name')
macros[name] = elt
macros['xacro:' + name] = elt
elt.parentNode.removeChild(elt)
elt = None
else:
previous = elt
elt = next_element(previous)
return macros
# Returns a Table of the properties
def grab_properties(doc):
table = Table()
previous = doc.documentElement
elt = next_element(previous)
while elt:
if elt.tagName == 'property' or elt.tagName == 'xacro:property':
name = elt.getAttribute('name')
value = None
if elt.hasAttribute('value'):
value = elt.getAttribute('value')
else:
name = '**' + name
value = elt # debug
bad = string.whitespace + "${}"
has_bad = False
for b in bad:
if b in name:
has_bad = True
break
if has_bad:
sys.stderr.write('Property names may not have whitespace, ' +
'"{", "}", or "$" : "' + name + '"')
else:
table[name] = value
elt.parentNode.removeChild(elt)
elt = None
else:
previous = elt
elt = next_element(previous)
return table
def eat_ignore(lex):
while lex.peek() and lex.peek()[0] == lex.IGNORE:
lex.next()
def eval_lit(lex, symbols):
eat_ignore(lex)
if lex.peek()[0] == lex.NUMBER:
return float(lex.next()[1])
if lex.peek()[0] == lex.SYMBOL:
try:
key = lex.next()[1]
value = symbols[key]
except KeyError as ex:
raise XacroException("Property wasn't defined: %s" % str(ex))
if not (isnumber(value) or isinstance(value, _basestr)):
if value is None:
raise XacroException("Property %s recursively used" % key)
raise XacroException("WTF2")
try:
return int(value)
except BaseException:
try:
return float(value)
except BaseException:
# prevent infinite recursion
symbols[key] = None
result = eval_text(value, symbols)
# restore old entry
symbols[key] = value
return result
raise XacroException("Bad literal")
def eval_factor(lex, symbols):
eat_ignore(lex)
neg = 1
if lex.peek()[1] == '-':
lex.next()
neg = -1
if lex.peek()[0] in [lex.NUMBER, lex.SYMBOL]:
return neg * eval_lit(lex, symbols)
if lex.peek()[0] == lex.LPAREN:
lex.next()
eat_ignore(lex)
result = eval_expr(lex, symbols)
eat_ignore(lex)
if lex.next()[0] != lex.RPAREN:
raise XacroException("Unmatched left paren")
eat_ignore(lex)
return neg * result
raise XacroException("Misplaced operator")
def eval_term(lex, symbols):
eat_ignore(lex)
result = 0
if lex.peek()[0] in [lex.NUMBER, lex.SYMBOL, lex.LPAREN] \
or lex.peek()[1] == '-':
result = eval_factor(lex, symbols)
eat_ignore(lex)
while lex.peek() and lex.peek()[1] in ['*', '/']:
op = lex.next()[1]
n = eval_factor(lex, symbols)
if op == '*':
result = float(result) * float(n)
elif op == '/':
result = float(result) / float(n)
else:
raise XacroException("WTF")
eat_ignore(lex)
return result
def eval_expr(lex, symbols):
eat_ignore(lex)
op = None
if lex.peek()[0] == lex.OP:
op = lex.next()[1]
if op not in ['+', '-']:
raise XacroException("Invalid operation. Must be '+' or '-'")
result = eval_term(lex, symbols)
if op == '-':
result = -float(result)
eat_ignore(lex)
while lex.peek() and lex.peek()[1] in ['+', '-']:
op = lex.next()[1]
n = eval_term(lex, symbols)
if op == '+':
result = float(result) + float(n)
if op == '-':
result = float(result) - float(n)
eat_ignore(lex)
return result
def eval_text(text, symbols):
def handle_expr(s):
lex = QuickLexer(IGNORE=r"\s+",
NUMBER=r"(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?",
SYMBOL=r"[a-zA-Z_]\w*",
OP=r"[\+\-\*/^]",
LPAREN=r"\(",
RPAREN=r"\)")
lex.lex(s)
return eval_expr(lex, symbols)
def handle_extension(s):
return eval_extension("$(%s)" % s)
results = []
lex = QuickLexer(DOLLAR_DOLLAR_BRACE=r"\$\$+\{",
EXPR=r"\$\{[^\}]*\}",
EXTENSION=r"\$\([^\)]*\)",
TEXT=r"([^\$]|\$[^{(]|\$$)+")
lex.lex(text)
while lex.peek():
if lex.peek()[0] == lex.EXPR:
results.append(handle_expr(lex.next()[1][2:-1]))
elif lex.peek()[0] == lex.EXTENSION:
results.append(handle_extension(lex.next()[1][2:-1]))
elif lex.peek()[0] == lex.TEXT:
results.append(lex.next()[1])
elif lex.peek()[0] == lex.DOLLAR_DOLLAR_BRACE:
results.append(lex.next()[1][1:])
return ''.join(map(str, results))
# Expands macros, replaces properties, and evaluates expressions
def eval_all(root, macros, symbols):
# Evaluates the attributes for the root node
for at in root.attributes.items():
result = eval_text(at[1], symbols)
root.setAttribute(at[0], result)
previous = root
node = next_node(previous)
while node:
if node.nodeType == xml.dom.Node.ELEMENT_NODE:
if node.tagName in macros:
body = macros[node.tagName].cloneNode(deep=True)
params = body.getAttribute('params').split()
# Parse default values for any parameters
defaultmap = {}
for param in params[:]:
splitParam = param.split(':=')
if len(splitParam) == 2:
defaultmap[splitParam[0]] = splitParam[1]
params.remove(param)
params.append(splitParam[0])
elif len(splitParam) != 1:
raise XacroException("Invalid parameter definition")
# Expands the macro
scoped = Table(symbols)
for name, value in node.attributes.items():
if name not in params:
raise XacroException(
"Invalid parameter \"%s\" while expanding macro \"%s\"" %
(str(name), str(
node.tagName)))
params.remove(name)
scoped[name] = eval_text(value, symbols)
# Pulls out the block arguments, in order
cloned = node.cloneNode(deep=True)
eval_all(cloned, macros, symbols)
block = cloned.firstChild
for param in params[:]:
if param[0] == '*':
while block and block.nodeType != xml.dom.Node.ELEMENT_NODE:
block = block.nextSibling
if not block:
raise XacroException(
"Not enough blocks while evaluating macro %s" % str(
node.tagName))
params.remove(param)
scoped[param] = block
block = block.nextSibling
# Try to load defaults for any remaining non-block parameters
for param in params[:]:
if param[0] != '*' and param in defaultmap:
scoped[param] = defaultmap[param]
params.remove(param)
if params:
raise XacroException(
"Parameters [%s] were not set for macro %s" %
(",".join(params), str(
node.tagName)))
eval_all(body, macros, scoped)
# Replaces the macro node with the expansion
for e in list(child_nodes(body)): # Ew
node.parentNode.insertBefore(e, node)
node.parentNode.removeChild(node)
node = None
elif node.tagName == 'xacro:arg':
name = node.getAttribute('name')
if not name:
raise XacroException("Argument name missing")
default = node.getAttribute('default')
if default and name not in substitution_args_context['arg']:
substitution_args_context['arg'][name] = default
node.parentNode.removeChild(node)
node = None
elif node.tagName == 'insert_block' or node.tagName == 'xacro:insert_block':
name = node.getAttribute('name')
if ("**" + name) in symbols:
# Multi-block
block = symbols['**' + name]
for e in list(child_nodes(block)):
node.parentNode.insertBefore(
e.cloneNode(deep=True), node)
node.parentNode.removeChild(node)
elif ("*" + name) in symbols:
# Single block
block = symbols['*' + name]
node.parentNode.insertBefore(
block.cloneNode(deep=True), node)
node.parentNode.removeChild(node)
else:
raise XacroException(
"Block \"%s\" was never declared" % name)
node = None
elif node.tagName in ['if', 'xacro:if', 'unless', 'xacro:unless']:
value = eval_text(node.getAttribute('value'), symbols)
try:
if value == 'true':
keep = True
elif value == 'false':
keep = False
else:
keep = float(value)
except ValueError:
raise XacroException(
"Xacro conditional evaluated to \"%s\". Acceptable evaluations are one of [\"1\",\"true\",\"0\",\"false\"]" %
value)
if node.tagName in ['unless', 'xacro:unless']:
keep = not keep
if keep:
for e in list(child_nodes(node)):
node.parentNode.insertBefore(
e.cloneNode(deep=True), node)
node.parentNode.removeChild(node)
else:
# Evals the attributes
for at in node.attributes.items():
result = eval_text(at[1], symbols)
node.setAttribute(at[0], result)
previous = node
elif node.nodeType == xml.dom.Node.TEXT_NODE:
node.data = eval_text(node.data, symbols)
previous = node
else:
previous = node
node = next_node(previous)
return macros
# Expands everything except includes
def eval_self_contained(doc):
macros = grab_macros(doc)
symbols = grab_properties(doc)
eval_all(doc.documentElement, macros, symbols)
def print_usage(exit_code=0):
print("Usage: %s [-o <output>] <input>" % 'xacro.py')
print(" %s --deps Prints dependencies" % 'xacro.py')
print(" %s --includes Only evalutes includes" % 'xacro.py')
sys.exit(exit_code)
def set_substitution_args_context(context={}):
substitution_args_context['arg'] = context
def open_output(output_filename):
if output_filename is None:
return sys.stdout
else:
return open(output_filename, 'w')
def main():
try:
opts, args = getopt.gnu_getopt(
sys.argv[1:], "ho:", ['deps', 'includes'])
except getopt.GetoptError as err:
print(str(err))
print_usage(2)
just_deps = False
just_includes = False
output_filename = None
for o, a in opts:
if o == '-h':
print_usage(0)
elif o == '-o':
output_filename = a
elif o == '--deps':
just_deps = True
elif o == '--includes':
just_includes = True
if len(args) < 1:
print("No input given")
print_usage(2)
# Process substitution args
set_substitution_args_context(load_mappings(sys.argv))
f = open(args[0])
doc = None
try:
doc = parse(f)
except xml.parsers.expat.ExpatError:
sys.stderr.write("Expat parsing error. Check that:\n")
sys.stderr.write(" - Your XML is correctly formed\n")
sys.stderr.write(" - You have the xacro xmlns declaration: " +
"xmlns:xacro=\"http://www.ros.org/wiki/xacro\"\n")
sys.stderr.write("\n")
raise
finally:
f.close()
process_includes(doc, os.path.dirname(args[0]))
if just_deps:
for inc in all_includes:
sys.stdout.write(inc + " ")
sys.stdout.write("\n")
elif just_includes:
doc.writexml(open_output(output_filename))
print()
else:
eval_self_contained(doc)
banner = [
xml.dom.minidom.Comment(c) for c in [
" %s " % ('=' * 83),
" | This document was autogenerated by xacro from %-30s | " % args[0],
" | EDITING THIS FILE BY HAND IS NOT RECOMMENDED %-30s | " % "",
" %s " % ('=' * 83)]]
first = doc.firstChild
for comment in banner:
doc.insertBefore(comment, first)
open_output(output_filename).write(doc.toprettyxml(indent=' '))
print()
main()
``` |
{
"source": "a1852rw/system_propraming",
"score": 4
} |
#### File: system_propraming/exam_001/q004_verb_post.py
```python
def verb_post(s):
irregular = {
"go": "went",
"put": "put",
"write": "wrote",
"find": "found",
"read": "read",
}
if s in irregular:
return irregular[s]
if s[-1] == "c":
return s + "ked"
if s[-1] == "e":
return s + "d"
if s[-1] == "y" and not s[-2] in ["a", "i", "u", "e", "o"]:
return s[:-1] + "ied"
return s + "ed"
print(verb_post("play"))
print(verb_post("like"))
print(verb_post("try"))
print(verb_post("picnic"))
print(verb_post("write"))
print(verb_post("go"))
print(verb_post("read"))
```
#### File: system_propraming/lesson_001/012_reverse_002.py
```python
def reserve(s):
return s[::-1]
# 引数「s」に格納されている文字列の最後から1文字ずつさかのぼって要素(文字)を取り出す
orig = "good"
result = reserve(orig)
print(result)
# 要件1:文字列を反転する関数「reverse」を書く
# 出力2:doog
```
#### File: system_propraming/lesson_001/013_reverse_003.py
```python
def reserve(s):
return str(s[::-1])
orig = input("Type a phrease: ")
result = reserve(orig)
if orig == result:
print("** palindrome **")
# 文字列が反転前/後で同じ値になる=回文である
elif orig != result:
print(result)
# 要件1:reverse関数を作成する
# 要件2:キーボードから受け取った文字列が回文の場合「** palindrome **」と出力する
# 要件3:キーボードから受け取った文字列が回文ではない場合、受け取った文字列を反転して出力する
# 入力1:alice
# 出力1:ecila
# 入力2:anna
# 出力2:** palindrome **
```
#### File: system_propraming/lesson_003/001_fix_first.py
```python
def fix_first(s):
s1 = s[:1]
s2 = s[1:]
return s1 + s2.replace(s1, "*")
print(fix_first("babble"))
print(fix_first("google"))
print(fix_first("apple"))
print(fix_first("orange"))
# 要件1:fix_first関数を記述する
# 要件2:文字列を受け取り最初の文字と同じ文字を'*'に変更して返却する
# 要件3:最初の文字は変更しない
# 出力1:ba**le
# 出力2:goo*le
# 出力3:apple
# 出力4:orange
```
#### File: system_propraming/lesson_003/007_linear_merge.py
```python
def linear_merge(li1,li2):
li3 = li1 + li2
# 変数「li3」にリスト「li1」と「li2」を結合したリストを代入する(「+」で結合できる)
return sorted(li3)
# リスト「li3」の要素を整列させて返却する
print(linear_merge(['aa','xx','zz'],['bb','cc']))
print(linear_merge(['aa','xx'],['bb','cc','zz']))
print(linear_merge(['aa','aa'],['aa','bb','bb']))
# 要件1:関数lier_mergeを記述する
# 要件2:昇順(⼩さい順)の2つのリストを受け取る
# 要件3:マージして要素の値で昇順で整列したリストを返却する
# 出力1:['aa','bb','cc','xx','zz']
# 出力2:['aa','bb','cc','xx','zz']
# 出力3:['aa','aa','aa','bb','bb']
``` |
{
"source": "a18792721831/StudyPython",
"score": 2
} |
#### File: web/handlers/LoginHandler.py
```python
import tornado
class LoginHandler(tornado.web.RequestHandler):
def get(self):
print('get')
def post(self):
print('post')
def put(self):
print('put')
def delete(self):
print('delete')
```
#### File: web/handlers/StaticsHandler.py
```python
import tornado
class StaticsHandler(tornado.web.RequestHandler):
def get(self):
self.render('../statics/' + self.get_argument('path'))
def post(self):
self.get()
def put(self):
self.get()
def delete(self):
self.get()
``` |
{
"source": "a18antsv/Python-Two-Week-Challenge",
"score": 4
} |
#### File: Python-Two-Week-Challenge/day-3/main.py
```python
def add_to_dict(a_dict = {}, key = None, value = None):
if type(a_dict) is not dict:
print(f"You need to send a dictonary. You sent: {type(a_dict)}")
elif key is None or value is None:
print("You need to send a word and a definition.")
elif key in a_dict:
print(f"{key} is already on the dictionary. Won't add.")
else:
a_dict[key] = value
print(f"{key} has been added.")
def get_from_dict(a_dict = {}, key = None):
if type(a_dict) is not dict:
print(f"You need to send a dictonary. You sent: {type(a_dict)}")
elif key is None:
print("You need to send a word to search for.")
elif key not in a_dict:
print(f"{key} was not found in this dict.")
else:
print(f"{key}: {a_dict[key]}")
def update_word(a_dict = {}, key = None, value = None):
if type(a_dict) is not dict:
print(f"You need to send a dictonary. You sent: {type(a_dict)}")
elif key is None or value is None:
print("You need to send a word and a definition to update.")
elif key not in a_dict:
print(f"{key} is not on the dict. Can't update non-existing word.")
else:
a_dict[key] = value
print(f"{key} has been updated to: {value}")
def delete_from_dict(a_dict = {}, key = None):
if type(a_dict) is not dict:
print(f"You need to send a dictonary. You sent: {type(a_dict)}")
elif key is None:
print("You need to specify a word to delete.")
elif key not in a_dict:
print(f"{key} is not in this dict. Won't delete.")
else:
del a_dict[key]
print(f"{key} has been deleted.")
# \/\/\/\/\/\/\ DO NOT TOUCH \/\/\/\/\/\/\
import os
os.system('clear')
my_english_dict = {}
print("\n###### add_to_dict ######\n")
# Should not work. First argument should be a dict.
print('add_to_dict("hello", "kimchi"):')
add_to_dict("hello", "kimchi")
# Should not work. Definition is required.
print('\nadd_to_dict(my_english_dict, "kimchi"):')
add_to_dict(my_english_dict, "kimchi")
# Should work.
print('\nadd_to_dict(my_english_dict, "kimchi", "The source of life."):')
add_to_dict(my_english_dict, "kimchi", "The source of life.")
# Should not work. kimchi is already on the dict
print('\nadd_to_dict(my_english_dict, "kimchi", "My fav. food"):')
add_to_dict(my_english_dict, "kimchi", "My fav. food")
print("\n\n###### get_from_dict ######\n")
# Should not work. First argument should be a dict.
print('\nget_from_dict("hello", "kimchi"):')
get_from_dict("hello", "kimchi")
# Should not work. Word to search from is required.
print('\nget_from_dict(my_english_dict):')
get_from_dict(my_english_dict)
# Should not work. Word is not found.
print('\nget_from_dict(my_english_dict, "galbi"):')
get_from_dict(my_english_dict, "galbi")
# Should work and print the definiton of 'kimchi'
print('\nget_from_dict(my_english_dict, "kimchi"):')
get_from_dict(my_english_dict, "kimchi")
print("\n\n###### update_word ######\n")
# Should not work. First argument should be a dict.
print('\nupdate_word("hello", "kimchi"):')
update_word("hello", "kimchi")
# Should not work. Word and definiton are required.
print('\nupdate_word(my_english_dict, "kimchi"):')
update_word(my_english_dict, "kimchi")
# Should not work. Word not found.
print('\nupdate_word(my_english_dict, "galbi", "Love it."):')
update_word(my_english_dict, "galbi", "Love it.")
# Should work.
print('\nupdate_word(my_english_dict, "kimchi", "Food from the gods."):')
update_word(my_english_dict, "kimchi", "Food from the gods.")
# Check the new value.
print('\nget_from_dict(my_english_dict, "kimchi"):')
get_from_dict(my_english_dict, "kimchi")
print("\n\n###### delete_from_dict ######\n")
# Should not work. First argument should be a dict.
print('\ndelete_from_dict("hello", "kimchi"):')
delete_from_dict("hello", "kimchi")
# Should not work. Word to delete is required.
print('\ndelete_from_dict(my_english_dict):')
delete_from_dict(my_english_dict)
# Should not work. Word not found.
print('\ndelete_from_dict(my_english_dict, "galbi"):')
delete_from_dict(my_english_dict, "galbi")
# Should work.
print('\ndelete_from_dict(my_english_dict, "kimchi"):')
delete_from_dict(my_english_dict, "kimchi")
# Check that it does not exist
print('\nget_from_dict(my_english_dict, "kimchi"):')
get_from_dict(my_english_dict, "kimchi")
# \/\/\/\/\/\/\ END DO NOT TOUCH \/\/\/\/\/\/\
```
#### File: Python-Two-Week-Challenge/day-9&10/main.py
```python
import requests
from flask import Flask, render_template, request, redirect
base_url = "http://hn.algolia.com/api/v1"
# This URL gets the newest stories.
new = f"{base_url}/search_by_date?tags=story"
# This URL gets the most popular stories
popular = f"{base_url}/search?tags=story"
# This function makes the URL to get the detail of a storie by id.
# Heres the documentation: https://hn.algolia.com/api
def make_detail_url(id):
return f"{base_url}/items/{id}"
db = {}
app = Flask("DayNine")
@app.route("/")
def index():
allowed_orders = ("popular", "new")
order_by = request.args.get("order_by")
if order_by:
order_by = order_by.lower()
if order_by not in allowed_orders:
order_by = allowed_orders[0]
posts_from_db = db.get(order_by)
if posts_from_db:
posts = posts_from_db
else:
posts = requests.get(globals()[order_by]).json()["hits"]
db[order_by] = posts
return render_template("index.html", order_by=order_by, posts=posts)
@app.route("/<id>")
def detail(id):
try:
request = requests.get(make_detail_url(id))
request.raise_for_status()
except requests.exceptions.HTTPError:
return redirect("/")
post = request.json()
return render_template(
"detail.html",
title=post.get("title"),
url=post.get("url"),
points=post.get("points"),
author=post.get("author"),
comments=post.get("children")
)
app.run(host="0.0.0.0")
``` |
{
"source": "a195297/iEntropy",
"score": 3
} |
#### File: a195297/iEntropy/draw.py
```python
from ase.visualize import view
from ase import Atoms
def drawing(file_path):
f = open(file_path, 'r') # 開啟並讀取檔案
lines = f.readlines() # 讀取檔案內容的每一行文字為陣列 是一堆strings的list
atom_num = len(lines) # 原子數
print("Atom_numbers: ",atom_num)
for i in range(atom_num): #去掉結尾換行
lines[i] = lines[i].replace('\n', '').replace('\r', '')
f.close() # 關閉檔案
Cell=[30, 30, 30]
name_list = []
position_list = []
for i in range(atom_num):
lines[i] = lines[i].split(' ') #現在每個 lines[i] 是一個 4 個字串組成的 list
#print(lines[i], end = '')
x = float(lines[i][1])*30
y = float(lines[i][2])*30
z = float(lines[i][3])*30
name_list.append(lines[i][0])
position_list.append((x,y,z))
All_atoms = Atoms(name_list, positions=position_list, cell=Cell)
view(All_atoms)
```
#### File: a195297/iEntropy/rndalloysgenerator3D_rev2.py
```python
import random
import numpy as np
import math
# mode=1:bcc / mode=2: fcc
def calculation(atom1,atom2,atom3,atom4,atom5,cpst1,cpst2,cpst3,cpst4,cpst5,mode,L,calentropy,vacancy,vacanpropor,seedingnumber,filename):
entropy = 0
vacanporporreduce=vacanpropor/100
if mode == 1:
# parameters setting
# only odd number is valid in this code
randq = 0 # 0 to 1;
eventnumber=5 # numbers of atom
cpstt=[cpst1, cpst2, cpst3, cpst4, cpst5]
token1=1
token2=2
token3=3
token4=4
token5=5
#====calculation====
random.seed(seedingnumber)
# re-distribution composition
rcpst = []
for i in range(eventnumber):
rcpst.append(cpstt[i]/sum(cpstt))
an_real=L**2*(L+1)/2+(L-1)**2*(L-1)/2
# 3-D space matrix
amap_odd=np.zeros((L,L,L)) # odd layer
amap_even=np.zeros((L-1,L-1,L-1)) # even layer
rmap_odd=np.random.random((L,L,L))
rmap_even=np.random.random((L-1,L-1,L-1))
x=0
y=0
z=0
# i,j,k distribution mode 1, naturally random
if mode == 1:
atom1p=[]
atom2p=[]
atom3p=[]
atom4p=[]
atom5p=[]
# odd layer loop
for i in range(L):
for j in range(L):
for k in range(L):
if rmap_odd[i,j,k] >= 0 and rmap_odd[i,j,k] < sum(rcpst[0:1]): # re-distribute 1
amap_odd[i,j,k]=token1
# deciding first atom position
x=(i)/(L-1)
y=(j)/(L-1)
z=(k)/(L-1)
atom1p.append([x, y, z])
elif rmap_odd[i,j,k] >= sum(rcpst[0:1]) and rmap_odd[i,j,k] < sum(rcpst[0:2]): # re-dist2
amap_odd[i,j,k]=token2
x=(i)/(L-1)
y=(j)/(L-1)
z=(k)/(L-1)
atom2p.append([x, y, z])
elif rmap_odd[i,j,k] >= sum(rcpst[0:2]) and rmap_odd[i,j,k] < sum(rcpst[0:3]): # re-dist3
amap_odd[i,j,k]=token3
x=(i)/(L-1)
y=(j)/(L-1)
z=(k)/(L-1)
atom3p.append([x, y, z])
elif rmap_odd[i,j,k] >= sum(rcpst[0:3]) and rmap_odd[i,j,k] < sum(rcpst[0:4]): # re-dist4
amap_odd[i,j,k]=token4
x=(i)/(L-1)
y=(j)/(L-1)
z=(k)/(L-1)
atom4p.append([x, y, z])
elif rmap_odd[i,j,k] >= sum(rcpst[0:4]) and rmap_odd[i,j,k] <= sum(rcpst[0:5]): # re-dist5
amap_odd[i,j,k]=token5
x=(i)/(L-1)
y=(j)/(L-1)
z=(k)/(L-1)
atom5p.append([x, y, z])
else:
print(i)
print(j)
print(k)
for i in range(L-1):
for j in range(L-1):
for k in range((L-1)):
if rmap_even[i,j,k] >= 0 and rmap_even[i,j,k] < sum(rcpst[0:1]): #re-distribute 1
amap_even[i,j,k]=token1
x=(i+1/2)/(L-1)
y=(j+1/2)/(L-1)
z=(k+1/2)/(L-1)
atom1p.append([x, y, z])
#%deciding first atom position
elif rmap_even[i,j,k] >= sum(rcpst[0:1]) and rmap_even[i,j,k] <sum(rcpst[0:2]): #re-dist2
amap_even[i,j,k]=token2
x=(i+1/2)/(L-1)
y=(j+1/2)/(L-1)
z=(k+1/2)/(L-1)
atom2p.append([x, y, z])
elif rmap_even[i,j,k] >= sum(rcpst[0:2]) and rmap_even[i,j,k] <sum(rcpst[0:3]): #re-dist3
amap_even[i,j,k]=token3
x=(i+1/2)/(L-1)
y=(j+1/2)/(L-1)
z=(k+1/2)/(L-1)
atom3p.append([x, y, z])
elif rmap_even[i,j,k] >= sum(rcpst[0:3]) and rmap_even[i,j,k] <sum(rcpst[0:4]): #re-dist4
amap_even[i,j,k]=token4
x=(i+1/2)/(L-1)
y=(j+1/2)/(L-1)
z=(k+1/2)/(L-1)
atom4p.append([x, y, z])
elif rmap_even[i,j,k] >= sum(rcpst[0:4]) and rmap_even[i,j,k] <= sum(rcpst[0:5]): #re-dist5
amap_even[i,j,k]=token5
x=(i+1/2)/(L-1)
y=(j+1/2)/(L-1)
z=(k+1/2)/(L-1)
atom5p.append([x, y, z])
else:
print(i)
print(j)
print(k)
# position saving
# k=1 -> z=0's plane and growing toward top
# i,j,k distribution conditionally random which based on randomness
B=[]
if calentropy == 1:
count=0
for i in range(eventnumber):
for j in range(eventnumber):
for k in range(eventnumber):
for l in range(eventnumber):
B.append([[i, j], [k, l]]) # define state
count=count+1;
# calculate possibility
P=np.zeros(len(B))
amap_odd_e=amap_odd
amap_even_e=amap_even
amap_odd_e=amap_odd
amap_even_e=amap_even
amap_odd_e=np.concatenate((amap_odd_e, [amap_odd_e[0]]), axis=0)
a=np.split(amap_odd_e,[1],axis=1)
amap_odd_e=np.concatenate((amap_odd_e, a[0]), axis=1)
amap_even_e=np.concatenate((amap_even_e, [amap_even_e[0]]), axis=0)
b=np.split(amap_even_e,[1],axis=1)
amap_even_e=np.concatenate((amap_even_e, b[0]), axis=1)
# odd
for i in range(L-1):
for j in range(L-1):
for k in range(L):
for kkk in range(len(B)):
posi=[[amap_odd_e[i,j,k], amap_odd_e[i+1,j,k]], [amap_odd_e[i,j+1,k], amap_odd_e[i+1,j+1,k]]]
#check=posi==B[kkk]
if posi==B[kkk]:
P[kkk]=P[kkk]+1
else:
P[kkk]=P[kkk]+0
for i in range(L-2):
for j in range(L-2):
for k in range(L-1):
for kkk in range(len(B)):
posi=[[amap_even_e[i,j,k], amap_even_e[i+1,j,k]], [amap_even_e[i,j+1,k], amap_even_e[i+1,j+1,k]]]
if posi==B[kkk]:
P[kkk]=P[kkk]+1
else:
P[kkk]=P[kkk]+0
P=P/sum(P) #/length(B)
entropy=0
for i in range(len(B)):
if P[i]!=0:
entropy=entropy+ -P[i]*math.log(P[i], 2)
print(entropy)
an_eval=len(atom1p)+len(atom2p)+len(atom3p)+len(atom4p)+len(atom5p)
print(an_eval)
# ###output###
# fid = open(filename, 'w') # path
# for i in range(len(atom1p)):
# fid.write('{:2s} {:8.6f} {:8.6f} {:8.6f}\n'.format(atom1, atom1p[i][0], atom1p[i][1], atom1p[i][2])) #按列輸出,若要按行輸出:fprintf(fid,'%.4\t',A(jj))
# for i in range(len(atom2p)):
# fid.write('{:2s} {:8.6f} {:8.6f} {:8.6f}\n'.format(atom2, atom2p[i][0], atom2p[i][1], atom2p[i][2])) #按列輸出,若要按行輸出:fprintf(fid,'%.4\t',A(jj))
# for i in range(len(atom3p)):
# fid.write('{:2s} {:8.6f} {:8.6f} {:8.6f}\n'.format(atom3, atom3p[i][0], atom3p[i][1], atom3p[i][2])) #按列輸出,若要按行輸出:fprintf(fid,'%.4\t',A(jj))
# for i in range(len(atom4p)):
# fid.write('{:2s} {:8.6f} {:8.6f} {:8.6f}\n'.format(atom4, atom4p[i][0], atom4p[i][1], atom4p[i][2])) #按列輸出,若要按行輸出:fprintf(fid,'%.4\t',A(jj))
# for i in range(len(atom5p)):
# fid.write('{:2s} {:8.6f} {:8.6f} {:8.6f}\n'.format(atom5, atom5p[i][0], atom5p[i][1], atom5p[i][2])) #按列輸出,若要按行輸出:fprintf(fid,'%.4\t',A(jj))
# fid.close()
elif mode == 2:
# for fcc
randq = 0 # 0 to 1;
seedingnumber=126
eventnumber=5 # numbers of atom
cpstt=[cpst1, cpst2, cpst3, cpst4, cpst5]
token1=1
token2=2
token3=3
token4=4
token5=5
mode=1
#====calculation====
random.seed(seedingnumber)
# re-distribution composition
rcpst = []
for i in range(eventnumber):
rcpst.append(cpstt[i]/sum(cpstt))
an_real=L**2*(L+1)/2+(L-1)**2*(L-1)/2
# 3-D space matrix
amap_odd=np.zeros((L,L,L)) # odd layer 1
amap_even=np.zeros((L-1,L-1,L)) # odd layer 2
amap_2=np.zeros((2*L-1,2*L-1,L-1)) #fcc even
rmap_odd=np.random.random((L,L,L))
rmap_even=np.random.random((L-1,L-1,L))
rmap_2=np.random.random((2*L-1,2*L-1,L-1))
x=0
y=0
z=0
for i in range(0, 2*L-1, 2):
for j in range(0, 2*L-1, 2):
rmap_2[i,j,:]=-5
for i in range(1, 2*L-2, 2):
for j in range(1, 2*L-2, 2):
rmap_2[i,j,:]=-5
# i,j,k distribution mode 1, naturally random
atom1p=[]
atom2p=[]
atom3p=[]
atom4p=[]
atom5p=[]
# odd layer loop
for i in range(L):
for j in range(L):
for k in range(L):
if rmap_odd[i,j,k] >= 0 and rmap_odd[i,j,k] < sum(rcpst[0:1]): # re-distribute 1
amap_odd[i,j,k]=token1
# deciding first atom position
x=(i)/(L-1)
y=(j)/(L-1)
z=(k)/(L-1)
atom1p.append([x, y, z])
elif rmap_odd[i,j,k] >= sum(rcpst[0:1]) and rmap_odd[i,j,k] < sum(rcpst[0:2]): # re-dist2
amap_odd[i,j,k]=token2
x=(i)/(L-1)
y=(j)/(L-1)
z=(k)/(L-1)
atom2p.append([x, y, z])
elif rmap_odd[i,j,k] >= sum(rcpst[0:2]) and rmap_odd[i,j,k] < sum(rcpst[0:3]): # re-dist3
amap_odd[i,j,k]=token3
x=(i)/(L-1)
y=(j)/(L-1)
z=(k)/(L-1)
atom3p.append([x, y, z])
elif rmap_odd[i,j,k] >= sum(rcpst[0:3]) and rmap_odd[i,j,k] < sum(rcpst[0:4]): # re-dist4
amap_odd[i,j,k]=token4
x=(i)/(L-1)
y=(j)/(L-1)
z=(k)/(L-1)
atom4p.append([x, y, z])
elif rmap_odd[i,j,k] >= sum(rcpst[0:4]) and rmap_odd[i,j,k] <= sum(rcpst[0:5]): # re-dist5
amap_odd[i,j,k]=token5
x=(i)/(L-1)
y=(j)/(L-1)
z=(k)/(L-1)
atom5p.append([x, y, z])
else:
print(i)
print(j)
print(k)
# even layer loop
for i in range(L-1):
for j in range(L-1):
for k in range(L):
if rmap_even[i,j,k] >= 0 and rmap_even[i,j,k] < sum(rcpst[0:1]): #re-distribute 1
amap_even[i,j,k]=token1
x=(i+1/2)/(L-1)
y=(j+1/2)/(L-1)
z=(k)/(L-1)
atom1p.append([x, y, z])
#%deciding first atom position
elif rmap_even[i,j,k] >= sum(rcpst[0:1]) and rmap_even[i,j,k] <sum(rcpst[0:2]): #re-dist2
amap_even[i,j,k]=token2
x=(i+1/2)/(L-1)
y=(j+1/2)/(L-1)
z=(k)/(L-1)
atom2p.append([x, y, z])
elif rmap_even[i,j,k] >= sum(rcpst[0:2]) and rmap_even[i,j,k] <sum(rcpst[0:3]): #re-dist3
amap_even[i,j,k]=token3
x=(i+1/2)/(L-1)
y=(j+1/2)/(L-1)
z=(k)/(L-1)
atom3p.append([x, y, z])
elif rmap_even[i,j,k] >= sum(rcpst[0:3]) and rmap_even[i,j,k] <sum(rcpst[0:4]): #re-dist4
amap_even[i,j,k]=token4
x=(i+1/2)/(L-1)
y=(j+1/2)/(L-1)
z=(k)/(L-1)
atom4p.append([x, y, z])
elif rmap_even[i,j,k] >= sum(rcpst[0:4]) and rmap_even[i,j,k] <= sum(rcpst[0:5]): #re-dist5
amap_even[i,j,k]=token5
x=(i+1/2)/(L-1)
y=(j+1/2)/(L-1)
z=(k)/(L-1)
atom5p.append([x, y, z])
else:
print(i)
print(j)
print(k)
# fcc
for i in range(2*L-1):
for j in range(2*L-1):
for k in range(L-1):
if rmap_2[i,j,k] >= 0 and rmap_2[i,j,k] < sum(rcpst[0:1]): #re-distribute 1
amap_2[i,j,k]=token1
x=(i)/(2*L-2)
y=(j)/(2*L-2)
z=(k+1/2)/(L-1)
atom1p.append([x, y, z])
#%deciding first atom position
elif rmap_2[i,j,k] >= sum(rcpst[0:1]) and rmap_2[i,j,k] <sum(rcpst[0:2]): #re-dist2
amap_2[i,j,k]=token2
x=(i)/(2*L-2)
y=(j)/(2*L-2)
z=(k+1/2)/(L-1)
atom2p.append([x, y, z])
elif rmap_2[i,j,k] >= sum(rcpst[0:2]) and rmap_2[i,j,k] <sum(rcpst[0:3]): #re-dist3
amap_2[i,j,k]=token3
x=(i)/(2*L-2)
y=(j)/(2*L-2)
z=(k+1/2)/(L-1)
atom3p.append([x, y, z])
elif rmap_2[i,j,k] >= sum(rcpst[0:3]) and rmap_2[i,j,k] <sum(rcpst[0:4]): #re-dist4
amap_2[i,j,k]=token4
x=(i)/(2*L-2)
y=(j)/(2*L-2)
z=(k+1/2)/(L-1)
atom4p.append([x, y, z])
elif rmap_2[i,j,k] >= sum(rcpst[0:4]) and rmap_2[i,j,k] <= sum(rcpst[0:5]): #re-dist5
amap_2[i,j,k]=token5
x=(i)/(2*L-2)
y=(j)/(2*L-2)
z=(k+1/2)/(L-1)
atom5p.append([x, y, z])
# position saving
# k=1 -> z=0's plane and growing toward top
# i,j,k distribution conditionally random which based on randomness
B=[]
if calentropy == 1:
count=0
for i in range(eventnumber):
for j in range(eventnumber):
for k in range(eventnumber):
for l in range(eventnumber):
B.append([[i, j], [k, l]]) # define state
count=count+1;
# calculate possibility
P=np.zeros(len(B))
amap_odd_e=amap_odd
amap_even_e=amap_even
amap_2_e=amap_2
amap_odd_e=np.concatenate((amap_odd_e, [amap_odd_e[0]]), axis=0)
a=np.split(amap_odd_e,[1],axis=1)
amap_odd_e=np.concatenate((amap_odd_e, a[0]), axis=1)
amap_even_e=np.concatenate((amap_even_e, [amap_even_e[0]]), axis=0)
b=np.split(amap_even_e,[1],axis=1)
amap_even_e=np.concatenate((amap_even_e, b[0]), axis=1)
amap_2_e=np.concatenate((amap_2_e, [amap_2_e[0]]), axis=0)
b=np.split(amap_2_e,[1],axis=1)
amap_2_e=np.concatenate((amap_2_e, b[0]), axis=1)
# odd
for i in range(L-1):
for j in range(L-1):
for k in range(L):
for kkk in range(len(B)):
posi=[[amap_odd_e[i,j,k], amap_odd_e[i+1,j,k]], [amap_odd_e[i,j+1,k], amap_odd_e[i+1,j+1,k]]]
#check=posi==B[kkk]
if posi==B[kkk]:
P[kkk]=P[kkk]+1
else:
P[kkk]=P[kkk]+0
for i in range(L-2):
for j in range(L-2):
for k in range(L):
for kkk in range(len(B)):
posi=[[amap_even_e[i,j,k], amap_even_e[i+1,j,k]], [amap_even_e[i,j+1,k], amap_even_e[i+1,j+1,k]]]
if posi==B[kkk]:
P[kkk]=P[kkk]+1
else:
P[kkk]=P[kkk]+0
# fcc layer 2
for i in range(2*L-2):
for j in range(2*L-2):
for k in range(L-1):
if amap_2[i,j,k]!=0:
for kkk in range(len(B)):
posi=[[amap_2_e[i,j,k], amap_2_e[i+1,j,k]], [amap_2_e[i,j+1,k], amap_2_e[i+1,j+1,k]]]
if posi==B[kkk]:
P[kkk]=P[kkk]+1
else:
P[kkk]=P[kkk]+0
P=P/sum(P) #/length(B)
entropy=0
for i in range(len(B)):
if P[i]!=0:
entropy=entropy+ -P[i]*math.log(P[i], 2)
print(entropy)
an_eval=len(atom1p)+len(atom2p)+len(atom3p)+len(atom4p)+len(atom5p)
print(an_eval)
###output###
if vacancy==1:
fid = open(filename, 'w') # path
for i in range(len(atom1p)):
rndnum=random.random()
if rndnum > vacanporporreduce:
fid.write('{:2s} {:8.6f} {:8.6f} {:8.6f}\n'.format(atom1, atom1p[i][0], atom1p[i][1], atom1p[i][2])) #按列輸出,若要按行輸出:fprintf(fid,'%.4\t',A(jj))
for i in range(len(atom2p)):
rndnum=random.random()
if rndnum > vacanporporreduce:
fid.write('{:2s} {:8.6f} {:8.6f} {:8.6f}\n'.format(atom2, atom2p[i][0], atom2p[i][1], atom2p[i][2])) #按列輸出,若要按行輸出:fprintf(fid,'%.4\t',A(jj))
for i in range(len(atom3p)):
rndnum=random.random()
if rndnum > vacanporporreduce:
fid.write('{:2s} {:8.6f} {:8.6f} {:8.6f}\n'.format(atom3, atom3p[i][0], atom3p[i][1], atom3p[i][2])) #按列輸出,若要按行輸出:fprintf(fid,'%.4\t',A(jj))
for i in range(len(atom4p)):
rndnum=random.random()
if rndnum > vacanporporreduce:
fid.write('{:2s} {:8.6f} {:8.6f} {:8.6f}\n'.format(atom4, atom4p[i][0], atom4p[i][1], atom4p[i][2])) #按列輸出,若要按行輸出:fprintf(fid,'%.4\t',A(jj))
for i in range(len(atom5p)):
rndnum=random.random()
if rndnum > vacanporporreduce:
fid.write('{:2s} {:8.6f} {:8.6f} {:8.6f}\n'.format(atom5, atom5p[i][0], atom5p[i][1], atom5p[i][2])) #按列輸出,若要按行輸出:fprintf(fid,'%.4\t',A(jj))
fid.close()
else:
fid = open(filename, 'w') # path
for i in range(len(atom1p)):
fid.write('{:2s} {:8.6f} {:8.6f} {:8.6f}\n'.format(atom1, atom1p[i][0], atom1p[i][1], atom1p[i][2])) #按列輸出,若要按行輸出:fprintf(fid,'%.4\t',A(jj))
for i in range(len(atom2p)):
fid.write('{:2s} {:8.6f} {:8.6f} {:8.6f}\n'.format(atom2, atom2p[i][0], atom2p[i][1], atom2p[i][2])) #按列輸出,若要按行輸出:fprintf(fid,'%.4\t',A(jj))
for i in range(len(atom3p)):
fid.write('{:2s} {:8.6f} {:8.6f} {:8.6f}\n'.format(atom3, atom3p[i][0], atom3p[i][1], atom3p[i][2])) #按列輸出,若要按行輸出:fprintf(fid,'%.4\t',A(jj))
for i in range(len(atom4p)):
fid.write('{:2s} {:8.6f} {:8.6f} {:8.6f}\n'.format(atom4, atom4p[i][0], atom4p[i][1], atom4p[i][2])) #按列輸出,若要按行輸出:fprintf(fid,'%.4\t',A(jj))
for i in range(len(atom5p)):
fid.write('{:2s} {:8.6f} {:8.6f} {:8.6f}\n'.format(atom5, atom5p[i][0], atom5p[i][1], atom5p[i][2])) #按列輸出,若要按行輸出:fprintf(fid,'%.4\t',A(jj))
fid.close()
return entropy
``` |
{
"source": "A1996KASH/EcommerceWebSite",
"score": 3
} |
#### File: A1996KASH/EcommerceWebSite/getEmail.py
```python
from validate_email import validate_email
import sys
import dns.resolver
import itertools
import string
import random
from random import randint
firstName = sys.argv[1]
surName = sys.argv[2]
domainName= sys.argv[3]
def validDns(domainName):
records = dns.resolver.query(domainName, 'MX')
mxRecord = records[0].exchange
mxRecord = str(mxRecord)
if len(mxRecord) < 5 or "0.0.0.0" in mxRecord:
print '\n Invalid Domain name'
return False
else:
return True
def randN(n):
newVal = 0
assert n <= 10
l = list(range(10)) # compat py2 & py3
while l[0] == 0:
random.shuffle(l)
return int(''.join(str(d) for d in l[:n]))
def possibleEmail(firstName,surName,domainName):
listEmail = []
splChars = ['-',"_","."]
listEmail.append(str(firstName) + splChars[0] + str(surName) + "@" + str(domainName))
listEmail.append(str(firstName) + splChars[1] + str(surName) + "@" + str(domainName))
listEmail.append(str(firstName) + splChars[2] + str(surName) + "@" + str(domainName))
for i in range(0,10000):
listEmail.append(str(firstName) + str(surName) + str(randN(4)) + "@" + str(domainName))
return listEmail
if validDns(domainName) is True:
## Check for Email Address in the domain
mailIds = possibleEmail(firstName,surName,domainName)
i=0
retSts="Invalid"
while i < len(mailIds):
is_valid = validate_email(mailIds[i],verify=True)
if is_valid is True:
retSts = "Valid"
i = len(mailIds)
else:
i = i + 1
#return retSts
``` |
{
"source": "a1999370827/CoolQ-Sensoji-Temple-draw",
"score": 2
} |
#### File: CoolQ-Sensoji-Temple-draw/xyz.shinnshi.chouqian/CQGroupMemberListInfo.py
```python
import base64
from CQPack import CQUnpack
from CQGroupMemberInfo import CQGroupMemberInfo
def GetGroupMemberListInfo(data):
memList = []
data = base64.decodestring(data)
info = CQUnpack(data)
count = info.GetInt()
while count:
if info.Len() <= 0:
break
retData = info.GetLenStr()
memInfo = CQGroupMemberInfo(retData,False)
memList.append(memInfo)
return memList
'''
EXAMPLE:
from CQGroupMemberInfo import CQGroupMemberInfo
info = CQGroupMemberInfo(CQSDK.GetGroupMemberInfoV2(fromGroup, fromQQ))
'''
``` |
{
"source": "A1ANGithub/NBAComposer",
"score": 3
} |
#### File: A1ANGithub/NBAComposer/findStar.py
```python
import testcode.pointDict as pd
from collections import namedtuple
import re
import testcode.dbUtil as dbUtil
import testcode.dataAnalysis as da
"""Removed"""
ExtractedRecord = namedtuple('ExtractedRecord', ['text_pattern', 'player_score_dict'])
def get_point_dict():
return pd.point_dict
def find_player_from_name(player_list, name):
for player in player_list:
if name in (player.full_name, player.another_last_name, player.last_name):
return player.full_name
return None
def hide_all(record: str, team_name_list: list, player_name_list: list):
record = hide_team_name(record, team_name_list)
record = hide_location(record)
record = hide_player_name(record, player_name_list)
return record
def hide_team_name(record: str, team_name_list: list):
for name in team_name_list:
if name in record:
record = record.replace(name, '<team_name>')
return record
def hide_location(record: str):
record = re.sub(r'(\d+)英尺外', "<location>", record)
return record
def hide_player_name(record: str, name_list: list):
name_list = list(name_list)
name_list = sorted(name_list, key=len, reverse=True)
for name in name_list:
if name in record:
record = record.replace(name, '<player_name>')
return record
def extract_players(record, text_pattern):
pieces = text_pattern.split('<player_name>')
# print(pieces)
record_pieces = []
for record_piece in pieces:
if record_piece != '':
record_pieces.append(record_piece)
for piece in record_pieces:
record = record.replace(piece, '+')
record_pieces = record.split('+')
players = []
for record_piece in record_pieces:
if record_piece != '':
players.append(record_piece)
return players
def extract_record(record: str, team_name_list, player_name_list, point_dict: dict):
text_pattern = hide_all(record, team_name_list, player_name_list)
players = extract_players(record, text_pattern)
points = point_dict.get(text_pattern)
if points is None:
point_score_dict = {player: 0 for player in players}
else:
point_score_dict = {player: point for player, point in zip(players, points)}
return point_score_dict
def get_player_name_list(player_list):
name_list = list()
for player in player_list:
name_list.append(player.full_name)
name_list.append(player.last_name)
name_list.append(player.another_last_name)
name_list.append('大' + player.another_last_name)
name_list.append('小' + player.another_last_name)
name_list += ['迈卡威', '麦卡杜', '慈世平', '德隆', '科比', '费弗斯', '恩瓦巴']
return name_list
def find_star(record_list: list, player_list: list, point_dict: dict, team_name_list) -> str:
player_name_list = get_player_name_list(player_list)
player_dict = {player.full_name: 0 for player in player_list}
for record in record_list:
point_score_dict = extract_record(record, team_name_list, player_name_list, point_dict)
for player_name in point_score_dict.keys():
full_name = find_player_from_name(player_list, player_name)
if full_name is not None:
player_dict[full_name] += point_score_dict[player_name]
return player_dict
if __name__ == '__main__':
team_name_list = list(da.get_team_name_set(dbUtil.DEMO_MATCH_ID))
player_list = list(da.get_away_players(dbUtil.DEMO_MATCH_ID)) + list(da.get_home_players(dbUtil.DEMO_MATCH_ID))
origin_records = dbUtil.get_match_record(dbUtil.DEMO_MATCH_ID)
record_list = []
for origin_record in origin_records:
record_list.append(origin_record[4])
player_dict = find_star(record_list, player_list, get_point_dict(), team_name_list)
for player in da.get_away_players(dbUtil.DEMO_MATCH_ID):
print(player.full_name, " : ", player_dict[player.full_name])
``` |
{
"source": "a1b2c3d4e5x/py_gmail_sender",
"score": 2
} |
#### File: py_gmail_sender/core/send.py
```python
import sys
sys.path.append("..")
from const.color import output_error
from concurrent.futures import ThreadPoolExecutor
from fake_useragent import UserAgent
from bs4 import BeautifulSoup
from datetime import datetime
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import pandas as pd
import traceback
import requests
import random
import time
import sys
import os
import smtplib
import ssl
def write_to_file(log_file: str, text: str):
today = datetime.today()
time = today.strftime('%Y-%m-%d-%H:%M:%S')
log_df = pd.DataFrame([(text, time)], columns=['LOG', 'TIME'])
with open(log_file, mode = 'a') as f:
log_df.to_csv(f, header = f.tell() == 0, index = False)
output_error(text)
def to_log(text: str):
today = datetime.today()
date = today.strftime('%Y-%m-%d')
log_file = os.getcwd() + '/result/log-' + date + '.csv'
write_to_file(log_file, text)
def to_error(text: str):
today = datetime.today()
date = today.strftime('%Y-%m-%d')
log_file = os.getcwd() + '/result/error-' + date + '.csv'
write_to_file(log_file, text)
def some_exception(e):
error_class = e.__class__.__name__ # 取得錯誤類型
detail = e.args[0] # 取得詳細內容
cl, exc, tb = sys.exc_info() # 取得Call Stack
lastCallStack = traceback.extract_tb(tb)[-1] # 取得Call Stack的最後一筆資料
fileName = lastCallStack[0] # 取得發生的檔案名稱
lineNum = lastCallStack[1] # 取得發生的行號
funcName = lastCallStack[2] # 取得發生的函數名稱
errMsg = "File \"{}\", line {}, in {}: [{}] {}".format(fileName, lineNum, funcName, error_class, detail)
to_error(errMsg)
class SendMail(object):
def from_csv(sender_csv: str, mail_format: str, receiver_csv: str):
# 建立資料夾
result_folder = os.getcwd() + '/result'
if False == os.path.isdir(result_folder):
os.mkdir(result_folder)
try:
sender_df = pd.read_csv(sender_csv)
receiver_df = pd.read_csv(receiver_csv)
except Exception as e:
print('無法轉換為 pandas')
some_exception(e)
mail_index: int = 0 # 用於記錄目前使用於寄出 email 的 index
for index, row in receiver_df.iterrows():
sender_email: str = sender_df['EMAIL'][sender_df.index[mail_index]]
sender_password: str = sender_df['APPLICATION_PASSWORD'][sender_df.index[mail_index]]
receiver_name = row["NAME"]
receiver_email = row["EMAIL"]
context = ssl.create_default_context()
with smtplib.SMTP_SSL(host='smtp.gmail.com', port='465') as smtp:
try:
print('sender_email: ' + sender_email + ' -> ' + receiver_email)
content = MIMEMultipart()
content["subject"] = 'Hi: ' + receiver_name
content["from"] = sender_email
content["to"] = receiver_email
content.attach(MIMEText("Demo python send email"))
smtp.ehlo() # 驗證 SMTP 伺服器
smtp.login(sender_email, sender_password) # 登入寄件者gmail
print('1')
smtp.send_message(content)
print('2')
to_log('[Done] ' + receiver_email)
time.sleep(5)
except Exception as e:
print('無法送出 Email')
some_exception(e)
#traceback.print_exc()
time.sleep(5)
mail_index += 1
if mail_index >= len(sender_df):
mail_index = 0
return
return
# Google SMTP Server
def from_excel(sender_csv: str, mail_format: str, receiver_excel: str):
print(sender_csv, mail_format, receiver_csv)
return
if False == isinstance(area_id, int):
area_id = 0
#fixed_url: str = 'https://www.iyp.com.tw/showroom.php?cate_name_eng_lv1=agriculture&cate_name_eng_lv3=agriculture-equip&a_id=4'
fixed_url: str = 'https://www.iyp.com.tw/showroom.php?cate_name_eng_lv1=' + main_category + '&cate_name_eng_lv3=' + sub_category + '&a_id=' + str(area_id)
page: int = 0
total_count: int = 0
total_email: int = 0
"""
# test parse content
# https://www.iyp.com.tw/082322152
content = Spider_ipy.spider_content('https://www.iyp.com.tw/082322152')
print(content)
return
"""
try:
# 建立資料夾
result_folder = os.getcwd() + '/result'
if False == os.path.isdir(result_folder):
os.mkdir(result_folder)
main_cate_folder = result_folder + '/' + main_category
if False == os.path.isdir(main_cate_folder):
os.mkdir(main_cate_folder)
sub_cate_file = result_folder + '/' + main_category + '/' + sub_category + '.csv'
sub_cate_log = result_folder + '/' + main_category + '/__log__.csv'
except:
to_log('無法建立資料夾: ' + main_category + ', ' + sub_category)
traceback.print_exc()
return
while True:
try:
# 組合出要爬得 url
target_url = fixed_url + '&p=' + str(page)
to_log('[TARGET] ' + target_url)
# 假資料模擬瀏覽器
headers = {'user-agent': UserAgent().random}
# 取得網頁資料
pageRequest = requests.get(target_url, headers = headers)
pageRequest.encoding = pageRequest.apparent_encoding
except:
to_log('無法 request 列表: ' + target_url)
traceback.print_exc()
time.sleep((random.random() * 10) + 120)
continue;
try:
soup = BeautifulSoup(pageRequest.text, 'html.parser')
except:
to_log('無法轉成 html: ' + target_url)
to_log('細節: \n\t' + str(pageRequest).replace('\n', '\n\t'))
traceback.print_exc()
break;
store_data_array = []
try:
# 爬到文章 List 區塊
res_block_list = soup.find(id = 'search-res')
# VIP 店家, 優質店家, 一般店家
store_block_list = res_block_list.find_all('ol', class_ = ['recommend', 'diamond', 'general'], recursive = False)
if 0 == len(store_block_list):
break
for list_ol in store_block_list:
store_list = list_ol.find_all('li', recursive = False)
if 0 == len(store_list):
break
for list_li in store_list:
item_a = list_li.select_one('h3 a')
# 統一網站的 url 格式
store_name = item_a.text
store_name_url = item_a['href']
if '//ww' == store_name_url[:4]:
store_name_url = 'https:' + store_name_url
elif 'www.' == store_name_url[:4]:
store_name_url = 'https://' + store_name_url
store_data_array.append({'name': store_name, 'url': store_name_url})
except TypeError:
to_log('Parsing list url 失敗: ' + target_url)
traceback.print_exc()
break
total_count += len(store_data_array)
if 0 == len(store_data_array):
to_log('找不到任何資料: ' + target_url)
break
# 爬店家 ipy 內容頁,取回有 Email 的資訊
iyp_result = []
"""
with ThreadPoolExecutor(max_workers = 5) as executor:
time.sleep((random.random() + 0.5) * 2)
results = executor.map(Spider_ipy.spider_content, data['url'])
"""
for data in store_data_array:
if 'https://www.iyp.com.tw/' in data['url']:
print(' [FETCH] ' + data['name'], data['url'])
time.sleep((random.random() + 0.5) * 2)
store_content = Spider_ipy.spider_content(data['url'])
if None != store_content:
print('\033[36m [EMAIL] ' + store_content[0], '\033[0m')
iyp_result.append((data['name'], data['url'], store_content[0], store_content[1]))
else:
print(' [SKIP] ' + data['name'], data['url'])
# 儲存有找到 Email 的那些資料
if 0 != len(iyp_result):
total_email += len(iyp_result)
df = pd.DataFrame(iyp_result, columns=['NAME', 'URL', 'EMAIL', 'WEBSITE'])
with open(sub_cate_file, mode = 'a') as f:
df.to_csv(f, header = f.tell() == 0, index = False)
page += 1
time.sleep(3)
to_log(' [DONE] cate: ' + main_category + ', sub-cate: ' + sub_category + ', total: ' + str(total_count) + ', email: ' + str(total_email))
# 記錄這一次的 Log
fetch_log = [(str(total_count), str(total_email), sub_category, fixed_url)]
df_log = pd.DataFrame(fetch_log, columns=['TOTAL', 'EMAIL', 'SUB_CATEGORY', 'URL'])
with open(sub_cate_log, mode = 'a') as f:
df_log.to_csv(f, header = f.tell() == 0, index = False)
```
#### File: a1b2c3d4e5x/py_gmail_sender/main.py
```python
from const.color import color
from const.meta import meta
from core.send import SendMail
import sys
import os
class ArgvParser:
def __print_color_title(self, text: str):
print(color.BOLD + color.BLUE + text + color.END)
def __print_color_description(self, preText: str, postText: str):
print(' ' + color.BOLD + color.DARKCYAN + '--' + preText + ':' + color.END, postText)
# 從 excel / csv 取得並寄送 email
def send(self, path: str):
sender_csv = '/resource/sender_info.csv'
mail_format = '/resource/mail_template.html'
full_sender_csv = os.getcwd() + sender_csv
full_mail_format = os.getcwd() + mail_format
full_receiver_path = os.getcwd() + '/' + path
if False == os.path.isfile(full_receiver_path):
print('Not exist file:', full_receiver_path)
return
if False == os.path.isfile(full_sender_csv):
print('Missing file:', full_sender_csv)
return
if False == os.path.isfile(full_mail_format):
print('Missing file:', full_mail_format)
return
file_ext = path.split('.')[-1]
if 'xlsx' == file_ext:
SendMail.from_excel(full_sender_csv, full_mail_format, full_receiver_path)
elif 'csv' == file_ext:
SendMail.from_csv(full_sender_csv, full_mail_format, full_receiver_path)
else:
print('Just support file format .excel and .csv')
return
# 關於此程式的說明
def about(self):
print('會發送固定版型的 email 到指定的多個信箱')
# 關於此程式的開發資訊
def info(self):
print('Build Date :', meta.BUILD_DATE)
print('Build Version :', 'v' + meta.BUILD_VERSION)
print('Developer Name :', meta.DEVERPER_NAME)
print('Developer Email:', meta.DEVERPER_EMAIL)
# 未給任何參數
def none(self):
self.__print_color_title('指令說明')
self.__print_color_description(self.send.__name__ + ' [*receiver csv]', '發送 email')
self.__print_color_description(self.about.__name__, '關於此程式的說明')
self.__print_color_description(self.info.__name__, '關於此程式的開發資訊')
# 判斷輸入的參數指令
def __argv_is_cmd(fn_name: str) -> bool:
if 2 <= len(sys.argv):
return ('--' + fn_name) == sys.argv[1]
return True
# 處理 argv 的參數
def __parse_argv():
parser = ArgvParser()
if 2 == len(sys.argv):
if __argv_is_cmd(parser.about.__name__):
return parser.about()
elif __argv_is_cmd(parser.info.__name__):
return parser.info()
if 3 == len(sys.argv):
if __argv_is_cmd(parser.send.__name__):
return parser.send(sys.argv[2])
return parser.none()
# 主程式進入口
if __name__ == '__main__':
__parse_argv()
``` |
{
"source": "a1b2c3d4e5x/spider_iyp",
"score": 4
} |
#### File: const/sub_categories/base_category.py
```python
from typing import List
class BaseCategory(object):
def list(self) -> List[str]:
all = []
method_list = [method for method in dir(self) if method.startswith('_') is False]
for item in method_list:
if 'category_name' == item:
continue
elif 'category_id' == item:
continue
elif BaseCategory.list.__name__ == item:
continue
all.append(item)
return all
```
#### File: sub_categories/culture/education.py
```python
from typing import Dict
from ..base_category import BaseCategory
# 學術研究機構 academic-institutes
class AcademicInstitutes(object):
def academic_institutes() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '學術研究機構'
list['id'] = 'academic-institutes'
academic_institutes = {}
academic_institutes['research-institutions'] = '學術研究機構'
list['sub'] = academic_institutes
return list
# 學校 schools
class Schools(object):
def schools() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '學校'
list['id'] = 'schools'
schools = {}
schools['supplementary-correspondence-education'] = '補校、函授'
schools['colleges-universities'] = '大專院校'
schools['primary-schools'] = '小學'
schools['high-schools'] = '中學'
schools['kindergartens'] = '幼兒園'
list['sub'] = schools
return list
# 補習教育 remedial-education
class RemedialEducation(object):
def remedial_education() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '補習教育'
list['id'] = 'remedial-education'
remedial_education = {}
remedial_education['languages'] = '語文補習班'
remedial_education['dancing-yoga'] = '舞蹈、瑜伽'
remedial_education['computers'] = '電腦補習班'
remedial_education['specialties'] = '專業技能'
remedial_education['studying-tutorials'] = '升學補習班'
remedial_education['training-centers'] = '補習班'
remedial_education['talent-learning'] = '才藝補習班'
list['sub'] = remedial_education
return list
# 訓練 training
class Training(object):
def training() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '訓練'
list['id'] = 'training'
training = {}
training['training-courses'] = '訓練課程'
list['sub'] = training
return list
# 安親班 daycare-center
class DaycareCenter(object):
def daycare_center() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '安親班'
list['id'] = 'daycare-center'
daycare_center = {}
daycare_center['after-school-care'] = '課輔安親'
list['sub'] = daycare_center
return list
# 駕訓班 driving-training-course
class DrivingTrainingCourse(object):
def driving_training_course() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '駕訓班'
list['id'] = 'driving-training-course'
driving_training_course = {}
driving_training_course['driving-schools'] = '駕訓班'
list['sub'] = driving_training_course
return list
# 留學服務 study-service
class StudyService(object):
def study_service() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '留學服務'
list['id'] = 'study-service'
study_service = {}
study_service['international-education-services'] = '留學服務'
list['sub'] = study_service
return list
# 圖書館及設備 library-and-equipment
class LibraryAndEquipment(object):
def library_and_equipment() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '圖書館及設備'
list['id'] = 'library-and-equipment'
library_and_equipment = {}
library_and_equipment['libraries'] = '圖書館'
library_and_equipment['library-equipments'] = '圖書設備及用品'
list['sub'] = library_and_equipment
return list
# K書中心 reading-center
class ReadingCenter(object):
def reading_center() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = 'K書中心'
list['id'] = 'reading-center'
reading_center = {}
reading_center['studying-centers'] = 'K書中心'
list['sub'] = reading_center
return list
# 藝文、科學中心 art-science-center
class ArtScienceCenter(object):
def art_science_center() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '藝文、科學中心'
list['id'] = 'art-science-center'
art_science_center = {}
art_science_center['museums'] = '美術館、博物館'
art_science_center['educational-cultural-centers'] = '社教、文化中心'
art_science_center['astronomy-science-museum'] = '天文、科學館'
list['sub'] = art_science_center
return list
# 育才學術 education
class Education(BaseCategory, AcademicInstitutes, Schools, RemedialEducation, Training, DaycareCenter,
DrivingTrainingCourse, StudyService, LibraryAndEquipment, ReadingCenter, ArtScienceCenter):
category_name = '育才學術'
category_id = 'education'
```
#### File: sub_categories/culture/leisure.py
```python
from typing import Dict
from ..base_category import BaseCategory
# 旅行 travel-agency
class TravelAgency(object):
def travel_agency() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '旅行'
list['id'] = 'travel-agency'
travel_agency = {}
travel_agency['travel-agents'] = '旅行社'
travel_agency['travel-supplies'] = '旅行用品'
list['sub'] = travel_agency
return list
# 飯店旅館 hotel-service
class HotelService(object):
def hotel_service() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '飯店旅館'
list['id'] = 'hotel-service'
hotel_service = {}
hotel_service['bed-and-breakfast'] = '民宿'
hotel_service['Motels'] = '汽車旅館'
hotel_service['Hotels'] = '飯店、旅館'
hotel_service['hotel-supplies'] = '飯店備品'
list['sub'] = hotel_service
return list
# 畫及畫廊 art-galleries
class ArtGalleries(object):
def art_galleries() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '畫及畫廊'
list['id'] = 'art-galleries'
art_galleries = {}
art_galleries['art-galleries'] = '畫廊、藝廊'
art_galleries['picture-framing'] = '裱框'
list['sub'] = art_galleries
return list
# 嗜好 hobby
class Hobby(object):
def hobby() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '嗜好'
list['id'] = 'hobby'
hobby = {}
hobby['music-class'] = '音樂教室'
hobby['stamps-coins'] = '郵票、古幣'
hobby['chess-clubs'] = '棋社'
hobby['antiques'] = '古玩藝品'
list['sub'] = hobby
return list
# 音樂 music
class Music(object):
def music() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '音樂'
list['id'] = 'music'
music = {}
music['choir-orchestra'] = '合唱團、樂團'
music['pianos'] = '鋼琴'
music['musical-instruments'] = '樂器'
list['sub'] = music
return list
# 電影製片及發行 film-production
class FilmProduction(object):
def film_production() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '電影製片及發行'
list['id'] = 'film-production'
film_production = {}
film_production['movie-equipment'] = '電影器材'
film_production['movie-production'] = '影片製片'
list['sub'] = film_production
return list
# 戲劇 drama
class Drama(object):
def drama() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '戲劇'
list['id'] = 'drama'
drama = {}
drama['Song-and-Dance-Troupe'] = '康樂隊、歌舞團'
drama['program-planning'] = '節目企劃'
drama['performance-groups'] = '舞團、劇團'
drama['folk-art-performances'] = '民俗藝術表演'
drama['drama-production'] = '戲劇製作'
list['sub'] = drama
return list
# 寵物 pet
class Pet(object):
def pet() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '寵物'
list['id'] = 'pet'
pet = {}
pet['pet-training'] = '寵物訓練'
pet['pet-shops'] = '寵物店'
pet['aquarium-supplies'] = '水族館'
pet['bird-farms'] = '鳥園'
list['sub'] = pet
return list
# 體育團體及協會 sports-bodies
class SportsBodies(object):
def sports_bodies() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '體育團體及協會'
list['id'] = 'sports-bodies'
sports_bodies = {}
sports_bodies['sport-associations'] = '體育協會、運動團體'
list['sub'] = sports_bodies
return list
# 國術功夫 martial-arts
class MartialArts(object):
def martial_arts() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '國術功夫'
list['id'] = 'martial-arts'
martial_arts = {}
martial_arts['martial-arts'] = '武術道館'
martial_arts['traditional-chinese-boxing'] = '國術館'
list['sub'] = martial_arts
return list
# 娛樂用品 entertainment-supplies
class EntertainmentSupplies(object):
def entertainment_supplies() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '娛樂用品'
list['id'] = 'entertainment-supplies'
entertainment_supplies = {}
entertainment_supplies['games'] = '遊戲用品'
entertainment_supplies['mahjong'] = '麻將'
entertainment_supplies['playing-cards'] = '撲克牌、紙牌'
list['sub'] = entertainment_supplies
return list
# 休閒用品 leisure-supplies
class LeisureSupplies(object):
def leisure_supplies() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '休閒用品'
list['id'] = 'leisure-supplies'
leisure_supplies = {}
leisure_supplies['leisure-supplies'] = '休閒用品'
leisure_supplies['mountain-gears'] = '登山用品'
leisure_supplies['diving-equipment'] = '潛水器材'
leisure_supplies['fishing-gears'] = '漁具用品'
leisure_supplies['horse-riding'] = '馬術'
leisure_supplies['camping-gears'] = '露營用品'
list['sub'] = leisure_supplies
return list
# 運動器材及用品 sports-supplies
class SportsSupplies(object):
def sports_supplies() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '運動器材及用品'
list['id'] = 'sports-supplies'
sports_supplies = {}
sports_supplies['bowling-alley'] = '保齡球場'
sports_supplies['Balls'] = '球類器材'
sports_supplies['fitness-equip'] = '健身運動器材'
sports_supplies['golf'] = '高爾夫球'
sports_supplies['water-sports'] = '水上運動'
list['sub'] = sports_supplies
return list
# 運動場所 sports-venues
class SportsVenues(object):
def sports_venues() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '運動場所'
list['id'] = 'sports-venues'
sports_venues = {}
sports_venues['tennis-courts'] = '網球場'
sports_venues['baseball-fields'] = '棒球場'
sports_venues['stadiums'] = '體育場'
sports_venues['horse-riding-fields'] = '騎馬場'
sports_venues['skating-rink'] = '溜冰場'
sports_venues['swimming-pools'] = '游泳池'
sports_venues['golf-course'] = '高爾夫球場'
sports_venues['fitness-centers'] = '健身中心'
sports_venues['badminton-courts'] = '羽球館'
list['sub'] = sports_venues
return list
# 電影院 cinema
class Cinema(object):
def cinema() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '電影院'
list['id'] = 'cinema'
cinema = {}
cinema['theaters'] = '電影院'
list['sub'] = cinema
return list
# 娛樂場所 entertainment
class Entertainment(object):
def entertainment() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '娛樂場所'
list['id'] = 'entertainment'
entertainment = {}
entertainment['resorts'] = '渡假休閒中心'
entertainment['whale-watching-boating'] = '賞鯨、泛舟'
entertainment['pubs'] = '夜店'
entertainment['amusement-places'] = '遊樂場'
entertainment['KTV'] = 'KTV'
entertainment['fishing-shrimp-playground'] = '釣魚、釣蝦場'
entertainment['entertainment-business'] = '娛樂事業'
entertainment['dance-halls-karaoke-bars'] = '舞廳、歌廳'
entertainment['clubs'] = '俱樂部、夜總會'
entertainment['SPA-hot-springs'] = '泡湯'
list['sub'] = entertainment
return list
# 景點名勝 sightseeing
class Sightseeing(object):
def sightseeing() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '景點名勝'
list['id'] = 'sightseeing'
sightseeing = {}
sightseeing['tourist-farms'] = '觀光農場'
sightseeing['scenery-stops'] = '旅遊景點'
list['sub'] = sightseeing
return list
# 彩券 lottery
class Lottery(object):
def lottery() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '彩券'
list['id'] = 'lottery'
lottery = {}
lottery['lottery-shops'] = '彩券行'
list['sub'] = lottery
return list
# 樂在休閒 leisure
class Leisure(BaseCategory, TravelAgency, HotelService, ArtGalleries, Hobby, Music, FilmProduction,
Drama, Pet, SportsBodies, MartialArts, EntertainmentSupplies, LeisureSupplies, SportsSupplies,
SportsVenues, Cinema, Entertainment, Sightseeing, Lottery):
category_name = '樂在休閒'
category_id = 'leisure'
```
#### File: sub_categories/industry/chemical_industry.py
```python
from typing import Dict
from ..base_category import BaseCategory
# 化工及石化原料 petrochemical-materials
class PetrochemicalMaterials(object):
def petrochemical_materials() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '化工及石化原料'
list['id'] = 'petrochemical-materials'
petrochemical_materials = {}
petrochemical_materials['man-made-fiber-materials'] = '人造纖維原料'
petrochemical_materials['chemical-materials'] = '化工原料'
petrochemical_materials['glass-fiber-materials'] = '玻璃纖維原料'
petrochemical_materials['petrochemical-materials'] = '石化原料'
petrochemical_materials['starch'] = '澱粉'
petrochemical_materials['chemical-products'] = '化學'
petrochemical_materials['plastic-waste'] = '塑膠廢料'
list['sub'] = petrochemical_materials
return list
# 石化產品 petrochemicals
class Petrochemicals(object):
def petrochemicals() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '石化產品'
list['id'] = 'petrochemicals'
petrochemicals = {}
petrochemicals['synthetic-leather'] = '合成皮'
petrochemicals['foams-sponges'] = '海綿、泡綿'
petrochemicals['glass-fiber-products'] = '玻璃纖維製品'
petrochemicals['acrylic-products'] = '壓克力'
petrochemicals['wax'] = '蠟'
list['sub'] = petrochemicals
return list
# 油品 oil
class Oil(object):
def oil() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '油品'
list['id'] = 'oil'
oil = {}
oil['petroleum'] = '油'
oil['transportation-equip'] = '油品儲運設備'
list['sub'] = oil
return list
# 瓦斯業 gas-service
class GasService(object):
def gas_service() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '瓦斯業'
list['id'] = 'gas-service'
gas_service = {}
gas_service['gas'] = '瓦斯業'
list['sub'] = gas_service
return list
# 中間體 intermediate
class Intermediate(object):
def intermediate() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '中間體'
list['id'] = 'intermediate'
intermediate = {}
intermediate['aromatizer'] = '香精、香料'
intermediate['intermediate'] = '中間體'
intermediate['abrasive'] = '磨料'
intermediate['camphor'] = '樟腦'
list['sub'] = intermediate
return list
# 玻璃 glass
class Glass(object):
def glass() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '玻璃'
list['id'] = 'glass'
glass = {}
glass['glass-manufacturers'] = '玻璃製造'
glass['glass'] = '玻璃'
list['sub'] = glass
return list
# 氣體 gas
class Gas(object):
def gas() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '氣體'
list['id'] = 'gas'
gas = {}
gas['gas'] = '氣體'
list['sub'] = gas
return list
# 農藥品 pesticides
class Pesticides(object):
def pesticides() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '農藥品'
list['id'] = 'pesticides'
pesticides = {}
pesticides['pesticide'] = '農藥'
list['sub'] = pesticides
return list
# 消毒殺蟲藥劑 insecticide
class Insecticide(object):
def insecticide() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '消毒殺蟲藥劑'
list['id'] = 'insecticide'
insecticide = {}
insecticide['insecticide'] = '殺蟲劑'
insecticide['disinfectant-cleaner'] = '消毒清潔劑'
list['sub'] = insecticide
return list
# 紙品 paper
class Paper(object):
def paper() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '紙品'
list['id'] = 'paper'
paper = {}
paper['papermaking-pulp'] = '造紙、紙漿'
paper['paper-processing'] = '紙品加工'
paper['papers'] = '紙'
paper['industrial-paper'] = '工業用紙'
paper['packaging-papers'] = '包裝紙'
paper['cultural-papers'] = '文化用紙'
paper['household-papers'] = '家庭用紙'
list['sub'] = paper
return list
# 塑膠橡膠 plastics-and-rubber
class PlasticsAndRubber(object):
def plastics_and_rubber() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '塑膠橡膠'
list['id'] = 'plastics-and-rubber'
plastics_and_rubber = {}
plastics_and_rubber['plastic-material'] = '塑膠原料'
plastics_and_rubber['plastic-additives-color-agent'] = '塑膠添加劑'
plastics_and_rubber['plastic-products'] = '塑膠製品'
plastics_and_rubber['plastic-processing'] = '塑膠模型'
plastics_and_rubber['rubber-materials'] = '橡膠原料'
plastics_and_rubber['rubber-additive'] = '橡膠添加劑'
plastics_and_rubber['rubber-products'] = '橡膠製品'
list['sub'] = plastics_and_rubber
return list
# 塗料染料 paint-and-dye
class PaintAndDye(object):
def paint_and_dye() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '塗料染料'
list['id'] = 'paint-and-dye'
paint_and_dye = {}
paint_and_dye['paint-coating'] = '油漆、塗料'
paint_and_dye['pigment-dyestuff'] = '顏料、色料、染料'
paint_and_dye['coating-service'] = '塗膠服務'
list['sub'] = paint_and_dye
return list
# 溶劑黏劑 solvent-and-adhesives
class SolventAndAdhesives(object):
def solvent_and_adhesives() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '溶劑黏劑'
list['id'] = 'solvent-and-adhesives'
solvent_and_adhesives = {}
solvent_and_adhesives['solvent'] = '溶劑'
solvent_and_adhesives['adhesive'] = '樹脂'
list['sub'] = solvent_and_adhesives
return list
# 能源設備及產品 energy-products
class EnergyProducts(object):
def energy_products() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '能源設備及產品'
list['id'] = 'energy-products'
energy_products = {}
energy_products['energy-equip'] = '能源設備'
list['sub'] = energy_products
return list
# 化學工業 chemical-industry
class ChemicalIndustry(BaseCategory, PetrochemicalMaterials, Petrochemicals, Oil, GasService, Intermediate,
Glass, Gas, Pesticides, Insecticide, Paper, PlasticsAndRubber, PaintAndDye, SolventAndAdhesives, EnergyProducts):
category_name = '化學工業'
category_id = 'chemical-industry'
```
#### File: sub_categories/industry/metal_industry.py
```python
from typing import Dict
from ..base_category import BaseCategory
# 金屬基本工業 basic-metal-industries
class BasicMetalIndustries(object):
def basic_metal_industries() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '金屬基本工業'
list['id'] = 'basic-metal-industries'
basic_metal_industries = {}
basic_metal_industries['stainless-steel-materials'] = '不銹鋼材料'
basic_metal_industries['stainless-steel-products'] = '不銹鋼製品'
basic_metal_industries['angle-steel'] = '角鋼'
basic_metal_industries['non-ferrous-metal-products'] = '非鐵金屬'
basic_metal_industries['copper-products'] = '銅及銅製品'
basic_metal_industries['aluminum-products'] = '鋁及鋁製品'
basic_metal_industries['steel'] = '鋼鐵'
basic_metal_industries['tin-products'] = '錫及錫製品'
list['sub'] = basic_metal_industries
return list
# 金屬工具 metal-tool
class MetalTool(object):
def metal_tool() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '金屬工具'
list['id'] = 'metal-tool'
metal_tool = {}
metal_tool['cutlery'] = '刀、剪刀'
metal_tool['tools'] = '工具'
metal_tool['cutting-tools'] = '切削工具'
metal_tool['hand-tools'] = '手工具'
metal_tool['air-tools'] = '氣動工具'
metal_tool['grinding-tools'] = '研磨工具'
metal_tool['power-tools'] = '電動工具'
metal_tool['trolleys'] = '手推車'
list['sub'] = metal_tool
return list
# 金屬模具 metal-mold
class MetalMold(object):
def metal_mold() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '金屬模具'
list['id'] = 'metal-mold'
metal_mold = {}
metal_mold['moldings'] = '模具'
metal_mold['press'] = '沖壓模具'
metal_mold['plastic'] = '塑膠模具'
metal_mold['die-casting'] = '壓鑄模具'
metal_mold['forging'] = '鍛造模具'
metal_mold['casting'] = '鑄造模具'
list['sub'] = metal_mold
return list
# 五金製品 hardware
class Hardware(object):
def hardware() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '五金製品'
list['id'] = 'hardware'
hardware = {}
hardware['valves'] = '凡而、考克'
hardware['tinplate'] = '馬口鐵'
hardware['marine-hardware'] = '船舶五金'
hardware['locks'] = '鎖類五金'
hardware['steel-products'] = '鋼'
hardware['hardware-manufacturers'] = '五金製造'
hardware['hardware'] = '五金製品'
list['sub'] = hardware
return list
# 金屬冶煉鍛鑄 malleable-metal-smelting
class MalleableMetalSmelting(object):
def malleable_metal_smelting() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '金屬冶煉鍛鑄'
list['id'] = 'malleable-metal-smelting'
malleable_metal_smelting = {}
malleable_metal_smelting['metal-smelting'] = '金屬冶煉'
malleable_metal_smelting['extrusion-rolling-pressure'] = '金屬擠壓、軋壓'
malleable_metal_smelting['metal-cutting'] = '金屬切割'
malleable_metal_smelting['powder-metallurgy'] = '粉末冶金'
malleable_metal_smelting['alloy'] = '合金'
malleable_metal_smelting['die-casting'] = '壓鑄'
malleable_metal_smelting['forging'] = '鍛造'
malleable_metal_smelting['casting'] = '鑄造'
malleable_metal_smelting['ironware-casting'] = '鐵器、鑄件'
list['sub'] = malleable_metal_smelting
return list
# 金屬表面處理 metal-surface-treatment
class MetalSurfaceTreatment(object):
def metal_surface_treatment() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '金屬表面處理'
list['id'] = 'metal-surface-treatment'
metal_surface_treatment = {}
metal_surface_treatment['heat-treatment'] = '熱處理'
metal_surface_treatment['plating'] = '電鍍'
metal_surface_treatment['welding'] = '電焊'
metal_surface_treatment['painting-spraying'] = '塗裝、噴漆'
metal_surface_treatment['surface-treatment'] = '表面處理'
list['sub'] = metal_surface_treatment
return list
# 廢五金 scrap
class Scrap(object):
def scrap() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '廢五金'
list['id'] = 'scrap'
scrap = {}
scrap['scrap'] = '五金廢料'
list['sub'] = scrap
return list
# 金屬工業 metal-industry
class MetalIndustry(BaseCategory, BasicMetalIndustries, MetalTool, MetalMold, Hardware, MalleableMetalSmelting,
MetalSurfaceTreatment, Scrap):
category_name = '金屬工業'
category_id = 'metal-industry'
```
#### File: sub_categories/life/housing.py
```python
from typing import Dict
from ..base_category import BaseCategory
# 營建工程 construction-engineering
class ConstructionEngineering(object):
def construction_engineering() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '營建工程'
list['id'] = 'construction-engineering'
construction_engineering = {}
construction_engineering['removal'] = '拆遷工程'
construction_engineering['building-cleaning'] = '建築物清洗'
construction_engineering['road-works'] = '道路工程'
construction_engineering['clean-rooms'] = '無塵室工程'
construction_engineering['swimming-pools'] = '游泳池承造'
construction_engineering['landscape'] = '景觀工程'
construction_engineering['civil-contractors'] = '土木承包商'
construction_engineering['scaffolding-works'] = '鷹架工程'
construction_engineering['steel-works'] = '鋼構工程'
construction_engineering['construction-companies'] = '營造廠'
construction_engineering['drilling'] = '鑽探工程'
construction_engineering['architectures'] = '建築師'
construction_engineering['construction'] = '營建工程'
construction_engineering['polishing-equip-supplies'] = '拋光設備用品'
list['sub'] = construction_engineering
return list
# 建築材料 building-materials
class BuildingMaterials(object):
def building_materials() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '建築材料'
list['id'] = 'building-materials'
building_materials = {}
building_materials['fire-retardant-materials'] = '耐火建材'
building_materials['lumber-wallboards'] = '建築木材、隔板'
building_materials['curtain-walls'] = '帷幕牆'
building_materials['asphalt'] = '瀝青、柏油'
building_materials['building-materials'] = '建築材料'
building_materials['doors-and-windows'] = '門窗'
building_materials['automatic-doors'] = '玻璃自動門'
building_materials['electric-shutters'] = '電動捲門'
building_materials['aluminum-windows-and-doors'] = '鋁門窗'
building_materials['cement'] = '水泥'
building_materials['concrete'] = '混凝土'
building_materials['gravel'] = '砂石'
building_materials['brick-tile-stone'] = '地磚石材'
building_materials['flooring-materials'] = '地板材料'
building_materials['ceiling'] = '天花板'
building_materials['tiles'] = '磁磚'
building_materials['marble'] = '大理石'
list['sub'] = building_materials
return list
# 建築模型及繪圖 building-model
class BuildingModel(object):
def building_model() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '建築模型及繪圖'
list['id'] = 'building-model'
building_model = {}
building_model['building-models-drawing'] = '建築模型與繪圖'
list['sub'] = building_model
return list
# 裝潢工程 decoration-works
class DecorationWorks(object):
def decoration_works() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '裝潢工程'
list['id'] = 'decoration-works'
decoration_works = {}
decoration_works['awnings-security-windows'] = '雨棚鐵窗工程'
decoration_works['decorations'] = '裝潢工程'
decoration_works['interior-designs'] = '室內設計'
decoration_works['painting-service'] = '油漆工程'
decoration_works['flooring'] = '地板工程'
decoration_works['wall-papers'] = '壁紙'
decoration_works['glass-installation'] = '玻璃安裝'
decoration_works['tatami-paper-doors'] = '疊蓆、紙門'
decoration_works['woodworking'] = '木工'
list['sub'] = decoration_works
return list
# 水電空調工程 air-conditioning-engineering
class AirConditioningEngineering(object):
def air_conditioning_engineering() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '水電空調工程'
list['id'] = 'air-conditioning-engineering'
air_conditioning_engineering = {}
air_conditioning_engineering['plumbing-and-electrical-engineering'] = '水電工程'
air_conditioning_engineering['plumbing-and-electrical-materials'] = '水電器材'
air_conditioning_engineering['air-condition'] = '空調工程'
air_conditioning_engineering['water-tanks'] = '水塔'
list['sub'] = air_conditioning_engineering
return list
# 工程檢測 engineering-testing
class EngineeringTesting(object):
def engineering_testing() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '工程檢測'
list['id'] = 'engineering-testing'
engineering_testing = {}
engineering_testing['building-safety'] = '建築安全檢查'
engineering_testing['concrete-test'] = '混凝土試驗'
engineering_testing['construction-inspection'] = '工程檢測'
list['sub'] = engineering_testing
return list
# 防水理水工程 waterproof-engineering
class WaterproofEngineering(object):
def waterproof_engineering() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '防水理水工程'
list['id'] = 'waterproof-engineering'
waterproof_engineering = {}
waterproof_engineering['plumbers'] = '防水抓漏工程'
waterproof_engineering['water-treatments'] = '水處理'
list['sub'] = waterproof_engineering
return list
# 衛浴設備及用品 bathroom-supplies
class BathroomSupplies(object):
def bathroom_supplies() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '衛浴設備及用品'
list['id'] = 'bathroom-supplies'
bathroom_supplies = {}
bathroom_supplies['bathroom-fixtures'] = '衛浴設備'
bathroom_supplies['water-heaters'] = '熱水器'
bathroom_supplies['solar-power-water-heaters'] = '太陽能熱水器'
bathroom_supplies['sauna-equipment'] = '三溫暖設備'
list['sub'] = bathroom_supplies
return list
# 廚具爐具 kitchen-appliances
class KitchenAppliances(object):
def kitchen_appliances() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '廚具爐具'
list['id'] = 'kitchen-appliances'
kitchen_appliances = {}
kitchen_appliances['cookware'] = '烹飪用具'
kitchen_appliances['range-hood'] = '抽油煙機'
kitchen_appliances['gas-supplies'] = '瓦斯煤氣行'
kitchen_appliances['gas-stoves'] = '瓦斯爐'
kitchen_appliances['kitchen-equip'] = '廚具流理台設備'
list['sub'] = kitchen_appliances
return list
# 電梯升降設備 lifting-equipment
class LiftingEquipment(object):
def lifting_equipment() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '電梯升降設備'
list['id'] = 'lifting-equipment'
lifting_equipment = {}
lifting_equipment['elevators-lifts'] = '電梯、升降機'
list['sub'] = lifting_equipment
return list
# 公寓大廈管理 building-management
class BuildingManagement(object):
def building_management() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '公寓大廈管理'
list['id'] = 'building-management'
building_management = {}
building_management['apartment-management'] = '公寓大廈管理'
list['sub'] = building_management
return list
# 保全消防 home-security
class HomeSecurity(object):
def home_security() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '保全消防'
list['id'] = 'home-security'
home_security = {}
home_security['fire-equipment'] = '消防設備'
home_security['security-equipment'] = '安全設備用品'
home_security['security-equip-alarm-system'] = '保全監視系統'
list['sub'] = home_security
return list
# 家具賣場 furniture
class Furniture(object):
def furniture() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '家具賣場'
list['id'] = 'furniture'
furniture = {}
furniture['furniture-stores'] = '家具賣場'
list['sub'] = furniture
return list
# 家具燈飾 furniture-and-light
class FurnitureAndLight(object):
def furniture_and_light() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '家具燈飾'
list['id'] = 'furniture-and-light'
furniture_and_light = {}
furniture_and_light['lightings'] = '燈飾'
furniture_and_light['outdoor-furniture'] = '戶外休閒家具'
furniture_and_light['furniture-repairs'] = '家具維修'
furniture_and_light['antique-furniture'] = '古董家具'
furniture_and_light['secondhand-furniture'] = '中古家具'
furniture_and_light['furniture-designs'] = '家具設計'
furniture_and_light['kids-furniture'] = '兒童家具'
furniture_and_light['bamboo-rattan-furniture'] = '竹、籐家具'
furniture_and_light['metal-furniture'] = '金屬家具'
furniture_and_light['furniture'] = '家具'
list['sub'] = furniture_and_light
return list
# 家飾寢具 bedding-and-mattress
class BeddingAndMattress(object):
def bedding_and_mattress() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '家飾寢具'
list['id'] = 'bedding-and-mattress'
bedding_and_mattress = {}
bedding_and_mattress['table-clothes'] = '桌巾、餐墊'
bedding_and_mattress['bedding-blankets'] = '寢具、被毯'
bedding_and_mattress['beds'] = '床'
bedding_and_mattress['curtains-blinds'] = '窗簾、百葉窗'
bedding_and_mattress['carpets'] = '地毯'
list['sub'] = bedding_and_mattress
return list
# 園藝 horticultural
class Horticultural(object):
def horticultural() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '園藝'
list['id'] = 'horticultural'
horticultural = {}
horticultural['florists'] = '花店'
#horticultural['landscape'] = '園藝設計工程'
horticultural['garden-nursery'] = '花圃、苗圃'
horticultural['garden-supplies'] = '園藝器具資材'
list['sub'] = horticultural
return list
# 住屋居家 housing
class Housing(BaseCategory,
ConstructionEngineering, BuildingMaterials, BuildingModel, DecorationWorks,
AirConditioningEngineering, EngineeringTesting, WaterproofEngineering, BathroomSupplies, KitchenAppliances,
LiftingEquipment, BuildingManagement, HomeSecurity, Furniture, FurnitureAndLight, BeddingAndMattress,
Horticultural
):
category_name = '住屋居家'
category_id = 'housing'
```
#### File: sub_categories/service/media.py
```python
from typing import Dict
from ..base_category import BaseCategory
# CD錄音帶 cd-and-tape
class CdAndTape(object):
def cd_and_tape() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = 'CD錄音帶'
list['id'] = 'cd-and-tape'
cd_and_tape = {}
cd_and_tape['CD-tapes-manufacturers'] = 'CD片、錄音帶製造'
cd_and_tape['music-shops'] = '唱片行'
list['sub'] = cd_and_tape
return list
# 影帶影碟光碟 dvd-and-vcd
class DvdAndVcd(object):
def dvd_and_vcd() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '影帶影碟光碟'
list['id'] = 'dvd-and-vcd'
dvd_and_vcd = {}
dvd_and_vcd['video-tapes-DVD-manufacturers'] = '錄影帶、DVD製造'
dvd_and_vcd['video-tapes-rental'] = '錄影帶出租'
dvd_and_vcd['video-tapes-DVD'] = '錄影帶、DVD'
list['sub'] = dvd_and_vcd
return list
# 錄音錄影 recording-video
class RecordingVideo(object):
def recording_video() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '錄音錄影'
list['id'] = 'recording-video'
recording_video = {}
recording_video['production-service'] = '視聽製作服務'
recording_video['recording-equip-manufacturers'] = '影音設備製造'
list['sub'] = recording_video
return list
# 眼鏡 glasses
class Glasses(object):
def glasses() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '眼鏡'
list['id'] = 'glasses'
glasses = {}
glasses['glasses-contact-lenses'] = '眼鏡、隱形眼鏡'
glasses['glasses-manufacturers'] = '眼鏡製造'
list['sub'] = glasses
return list
# 鐘錶 watches-and-clocks
class WatchesAndClocks(object):
def watches_and_clocks() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '鐘錶'
list['id'] = 'watches-and-clocks'
watches_and_clocks = {}
watches_and_clocks['watch-manufacturers'] = '鐘錶製造'
watches_and_clocks['watches'] = '鐘錶'
list['sub'] = watches_and_clocks
return list
# 攝影 photography
class Photography(object):
def photography() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '攝影'
list['id'] = 'photography'
photography = {}
photography['image-equip-materials'] = '影像設備材料'
photography['photo-processing'] = '相片沖洗'
photography['camera-supplies'] = '相機、攝影器材製造'
photography['camera'] = '相機、攝影機'
photography['photo-service'] = '攝影服務'
list['sub'] = photography
return list
# 視聽工程器材 media-supplies
class MediaSupplies(object):
def media_supplies() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '視聽工程器材'
list['id'] = 'media-supplies'
media_supplies = {}
media_supplies['cable-tv-equip'] = '有線電視設備'
media_supplies['stage-engineering'] = '舞台工程'
media_supplies['audio-video-engineering'] = '視聽工程'
list['sub'] = media_supplies
return list
# 儀器 instrument
class Instrument(object):
def instrument() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '儀器'
list['id'] = 'instrument'
instrument = {}
instrument['optical'] = '光學儀器'
instrument['weighting'] = '度量衡儀器'
instrument['surveying'] = '測量儀器'
instrument['temp-humidity'] = '溫濕度儀器'
instrument['laboratory'] = '科學實驗室設備'
instrument['instrument'] = '儀器'
list['sub'] = instrument
return list
# 數位錄放設備 digital-record-device
class DigitalRecordDevice(object):
def digital_record_device() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '數位錄放設備'
list['id'] = 'digital-record-device'
digital_record_device = {}
digital_record_device['digital-recording-equip'] = '數位錄放設備'
list['sub'] = digital_record_device
return list
# 聲光影視 media
class Media(BaseCategory, CdAndTape, DvdAndVcd, RecordingVideo, Glasses, WatchesAndClocks,
Photography, MediaSupplies, Instrument, DigitalRecordDevice):
category_name = '聲光影視'
category_id = 'media'
```
#### File: sub_categories/society/healthcare.py
```python
from typing import Dict
from ..base_category import BaseCategory
# 衛生機關及單位 health-agencies
class HealthAgencies(object):
def health_agencies() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '衛生機關及單位'
list['id'] = 'health-agencies'
health_agencies = {}
health_agencies['health-authorities'] = '衛生機關'
list['sub'] = health_agencies
return list
# 中醫 tcm-healthcare
class TcmHealthcare(object):
def tcm_healthcare() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '中醫'
list['id'] = 'tcm-healthcare'
tcm_healthcare = {}
tcm_healthcare['Chinese-medicines'] = '中醫'
list['sub'] = tcm_healthcare
return list
# 西醫 western-healthcare
class WesternHealthcare(object):
def western_healthcare() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '西醫'
list['id'] = 'western-healthcare'
western_healthcare = {}
western_healthcare['rehabilitation'] = '復健科'
western_healthcare['ophthalmology'] = '眼科'
western_healthcare['gynecology-and-obstetrics'] = '婦產科'
western_healthcare['otolaryngology'] = '耳鼻喉科'
western_healthcare['urology'] = '泌尿科'
western_healthcare['dermatology'] = '皮膚科'
western_healthcare['surgery'] = '外科'
western_healthcare['pediatric'] = '內兒科'
western_healthcare['dentistry'] = '牙科'
western_healthcare['physiology'] = '心理精神科'
western_healthcare['western-medicine'] = '西醫'
list['sub'] = western_healthcare
return list
# X光線院、醫學化驗院 x-ray
class XRay(object):
def x_ray() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = 'X光線院、醫學化驗院'
list['id'] = 'x-ray'
x_ray = {}
x_ray['laboratory'] = '檢驗所'
list['sub'] = x_ray
return list
# 藥品及藥材 drugs-and-medicine
class DrugsAndMedicine(object):
def drugs_and_medicine() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '藥品及藥材'
list['id'] = 'drugs-and-medicine'
drugs_and_medicine = {}
drugs_and_medicine['pharmacy'] = '藥局'
drugs_and_medicine['Chinese-medicines'] = '中藥行'
drugs_and_medicine['pharmaceutical-companies'] = '藥商'
list['sub'] = drugs_and_medicine
return list
# 動物醫院及藥品 veterinary
class Veterinary(object):
def veterinary() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '動物醫院及藥品'
list['id'] = 'veterinary'
veterinary = {}
veterinary['veterinary'] = '動物醫院'
veterinary['veterinary-medicine'] = '動物藥品'
list['sub'] = veterinary
return list
# 民俗療法及用品 folk-medicine
class FolkMedicine(object):
def folk_medicine() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '民俗療法及用品'
list['id'] = 'folk-medicine'
folk_medicine = {}
folk_medicine['herbal-shops'] = '青草店'
folk_medicine['folk-medicine-equipment'] = '民俗療法設備'
folk_medicine['folk-medicine'] = '民俗療法'
list['sub'] = folk_medicine
return list
# 生物科技、奈米科技 biotechnology
class Biotechnology(object):
def biotechnology() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '生物科技、奈米科技'
list['id'] = 'biotechnology'
biotechnology = {}
biotechnology['nanotechnology'] = '奈米科技'
biotechnology['biotechnology'] = '生物科技'
list['sub'] = biotechnology
return list
# 醫療器材 medical-supplies
class MedicalSupplies(object):
def medical_supplies() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '醫療器材'
list['id'] = 'medical-supplies'
medical_supplies = {}
medical_supplies['dental-supplies-dental-cast'] = '齒模'
medical_supplies['medical-equipment'] = '醫療器材'
medical_supplies['medical-equip-manufacturers'] = '醫療器材製造'
medical_supplies['hospital-supplies'] = '醫院設備'
medical_supplies['rehabilitation-equipment'] = '復健器材'
list['sub'] = medical_supplies
return list
# 醫事管理服務 medical-management
class MedicalManagement(object):
def medical_management() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '醫事管理服務'
list['id'] = 'medical-management'
medical_management = {}
medical_management['medical-management'] = '醫事管理'
list['sub'] = medical_management
return list
# 醫療救傷 medical-care
class MedicalCare(object):
def medical_care() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '醫療救傷'
list['id'] = 'medical-care'
medical_care = {}
medical_care['first-aids'] = '醫療服務'
list['sub'] = medical_care
return list
# 醫療矯正訓練 medical-remedial-training
class MedicalRemedialTraining(object):
def medical_remedial_training() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '醫療矯正訓練'
list['id'] = 'medical-remedial-training'
medical_remedial_training = {}
medical_remedial_training['medical-remedial-training'] = '醫療矯正訓練'
list['sub'] = medical_remedial_training
return list
# 安養看護服務 nursing-services
class NursingServices(object):
def nursing_services() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '安養看護服務'
list['id'] = 'nursing-services'
nursing_services = {}
nursing_services['child-care-centers'] = '保母'
nursing_services['maternity-centers'] = '坐月子中心'
nursing_services['rest-homes'] = '安養中心'
list['sub'] = nursing_services
return list
# 美容美髮 beauty-salon
class BeautySalon(object):
def beauty_salon() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '美容美髮'
list['id'] = 'beauty-salon'
beauty_salon = {}
beauty_salon['wigs'] = '假髮'
beauty_salon['tattoo'] = '紋身'
beauty_salon['nail-art'] = '指甲彩繪'
beauty_salon['hair-salons'] = '沙龍美髮'
beauty_salon['beauty-shops'] = '美容護膚'
beauty_salon['salon-equip'] = '美容設備'
list['sub'] = beauty_salon
return list
# 減肥及營養指導 diet-and-nutrition-center
class DietAndNutritionCenter(object):
def diet_and_nutrition_center() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '減肥及營養指導'
list['id'] = 'diet-and-nutrition-center'
diet_and_nutrition_center = {}
diet_and_nutrition_center['diet-nutrition-center'] = '減肥塑身'
list['sub'] = diet_and_nutrition_center
return list
# 衛生用品 sanitary-supplies
class SanitarySupplies(object):
def sanitary_supplies() -> Dict[str, Dict[str, str]]:
list = {}
list['name'] = '衛生用品'
list['id'] = 'sanitary-supplies'
sanitary_supplies = {}
sanitary_supplies['masks-towels'] = '口罩、毛巾'
sanitary_supplies['personal-hygiene'] = '個人衛生用品'
sanitary_supplies['personal-hygiene-manufacturers'] = '衛生用品製造'
list['sub'] = sanitary_supplies
return list
# 醫療保健 healthcare
class Healthcare(BaseCategory, HealthAgencies, TcmHealthcare, WesternHealthcare,
XRay, DrugsAndMedicine, Veterinary, FolkMedicine, Biotechnology,
MedicalSupplies, MedicalManagement, MedicalCare, MedicalRemedialTraining,
NursingServices, BeautySalon, DietAndNutritionCenter, SanitarySupplies
):
category_name = '醫療保健'
category_id = 'healthcare'
```
#### File: a1b2c3d4e5x/spider_iyp/main.py
```python
import sys
import concurrent.futures
from const.color import color
from const.meta import meta
from const.area_ids import area
from const.categories import categories
from core.spider_iyp import Spider_ipy
from utils.convert import csv_to_xlsx
class ArgvParser:
def __print_color_title(self, text: str):
print(color.BOLD + color.BLUE + text + color.END)
def __print_color_description(self, preText: str, postText: str):
print(' ' + color.BOLD + color.DARKCYAN + '--' + preText + ':' + color.END, postText)
# 使用主要爬蟲功能
def spider(self, main_category: str, sub_category: str, area_id: int = 0):
Spider_ipy.spider_list(main_category, sub_category, area_id)
def spider_all(self, main_category: str):
result = categories.sub_categories(main_category)
param = []
if None != result:
for mid_category in result:
for key, value in mid_category['sub'].items():
param.append(key)
if 0 != len(param):
helper = lambda scate: Spider_ipy.spider_list(main_category, scate)
with concurrent.futures.ThreadPoolExecutor(max_workers = 3) as executor:
executor.map(helper, param)
else:
self.__print_color_title('Not Found.')
# 列出主類別列表
def categories(self):
for part in categories.categories:
self.__print_color_title('\n' + part['name'])
for key, value in part['sub'].items():
print(' ' + value + ': ' + key)
# 列出主類別中的子類別列表
def subcategories(self, main_category: str):
result = categories.sub_categories(main_category)
if None != result:
for midCategory in result:
self.__print_color_title('\n' + midCategory['name'])
for key, value in midCategory['sub'].items():
print(' ' + value + ': ' + key)
else:
self.__print_color_title('Not Found.')
# 列出地區編號資訊
def area(self):
for key, values in area.all.items():
self.__print_color_title('\n' + key)
for k, v in values.items():
print(' ' + v + ':', k)
#print('\n')
# 關於此程式的說明
def about(self):
print('此爬蟲提供爬取公司黃頁相關資料')
print('網頁名稱: 中華黃頁網路電話簿')
print('網頁網址: https://www.iyp.com.tw')
# 關於此程式的開發資訊
def info(self):
print('Build Date :', meta.BUILD_DATE)
print('Build Version :', 'v' + meta.BUILD_VERSION)
print('Developer Name :', meta.DEVERPER_NAME)
print('Developer Email:', meta.DEVERPER_EMAIL)
# 把產出的 csv 轉成 excel
def to_excel(self):
csv_to_xlsx()
# 未給任何參數
def none(self):
self.__print_color_title('指令說明')
self.__print_color_description(self.spider.__name__ + ' [*主類別] [*子類別] [地區編號]', '爬取公司黃頁資料')
self.__print_color_description(self.spider.__name__ + ' [*主類別]', '爬取公司黃頁主類別下所有的子類別資料')
self.__print_color_description(self.categories.__name__, '列出主類別列表')
self.__print_color_description(self.subcategories.__name__ + ' [*主類別]', '列出主類別中的子類別列表')
self.__print_color_description(self.area.__name__, '列出地區編號資訊')
self.__print_color_description(self.about.__name__, '關於此程式的說明')
self.__print_color_description(self.info.__name__, '關於此程式的開發資訊')
self.__print_color_description(self.to_excel.__name__, '將產出的 csv 檔轉存成 excel')
# 判斷輸入的參數指令
def __argv_is_cmd(fn_name: str) -> bool:
if 2 <= len(sys.argv):
return ('--' + fn_name) == sys.argv[1]
return True
# 處理 argv 的參數
def __parse_argv():
parser = ArgvParser()
if 2 == len(sys.argv):
if __argv_is_cmd(parser.categories.__name__):
return parser.categories()
elif __argv_is_cmd(parser.area.__name__):
return parser.area()
elif __argv_is_cmd(parser.about.__name__):
return parser.about()
elif __argv_is_cmd(parser.info.__name__):
return parser.info()
elif __argv_is_cmd(parser.to_excel.__name__):
return parser.to_excel()
elif 3 == len(sys.argv):
if __argv_is_cmd(parser.subcategories.__name__):
return parser.subcategories(sys.argv[2])
elif __argv_is_cmd(parser.spider.__name__):
return parser.spider_all(sys.argv[2])
elif 4 <= len(sys.argv):
if __argv_is_cmd(parser.spider.__name__):
if 5 == len(sys.argv):
return parser.spider(sys.argv[2], sys.argv[3], sys.argv[4])
else:
return parser.spider(sys.argv[2], sys.argv[3])
return parser.none()
# 主程式進入口
if __name__ == '__main__':
__parse_argv()
# 有 Email 的
# https://www.iyp.com.tw/035182788
``` |
{
"source": "a1black/booktag",
"score": 3
} |
#### File: booktag/booktag/mediafile.py
```python
import re
from booktag import exceptions
from booktag import mutagenfacade
from booktag import osutils
from booktag import streams
from booktag.constants import AudioType, TagName
class AudioFile:
def __init__(self, path, format):
self._format = AudioType(format)
self._path = osutils.DirEntry(path)
self._audio_stream = None
self._image_stream = None
self._metadata = None
def __fspath__(self):
return self.path.path
def _lazy_load(self):
aformat, ainfo, ameta = mutagenfacade.from_file(self.path)
self._audio_stream = ainfo
self._format = AudioType(aformat)
self._metadata = ameta
def export_metadata(self):
mutagenfacade.export(self, self.metadata)
@property
def audio(self):
"""streams.AudioStream: Properties of audio stream in the file."""
if self._audio_stream is None:
self._lazy_load()
return self._audio_stream
@property
def cover(self):
"""streams.ImageStream: Embedded cover picture."""
metadata = self.metadata
return metadata.get(TagName.COVER, None)
@cover.setter
def cover(self, picture):
"""Sets album cover art."""
metadata = self.metadata
if picture is None:
del metadata[TagName.COVER]
elif isinstance(picture, bytes):
metadata[TagName.COVER] = streams.ImageStream.from_file(picture)
elif isinstance(picture, streams.ImageStream):
metadata[TagName.COVER] = picture
else:
raise TypeError('invalid type of image data: {0}'.format(
type(picture).__name__))
@cover.deleter
def cover(self):
"""Removes album cover art."""
del self.metadata[TagName.COVER]
@property
def format(self):
"""str: Audio format."""
return self._format
@property
def metadata(self):
"""streams.Metadata: Metadata tags stored in the file container."""
if self._metadata is None:
self._lazy_load()
return self._metadata
@property
def name(self):
"""str: Name of the file without extension."""
basename = self._path.name
return re.sub(r'\.(mp3|mp4|m4a|ogg)$', '', basename, flags=re.I)
@property
def path(self):
"""osutils.DirEntry: Pathname to the audio file."""
return self._path
@property
def size(self):
"""int: File size in bytes."""
return self._path.size(follow_symlinks=True)
@classmethod
def from_file(cls, path):
"""
Returns:
AudioFile: A new instance of a class.
"""
filetype = osutils.is_audio(path, follow_symlinks=False)
if not filetype:
raise exceptions.NotAnAudioFileError(path)
return cls(path, format=filetype)
```
#### File: booktag/booktag/settings.py
```python
import collections
from booktag import streams
class SettingContainer(collections.UserDict):
"""Class provides access to nested dictionaries using composite key.
Composite key is a sequence of keys separeted by a dot.
"""
def __contains__(self, key):
try:
self[key]
return True
except KeyError:
return False
def __getitem__(self, key):
try:
dict_, node = self._descend(key)
return dict_[node]
except (KeyError, TypeError):
raise KeyError(key)
def __setitem__(self, key, value):
try:
dict_, node = self._descend(key, fillin=True)
if not isinstance(value, type(self)) and hasattr(value, 'keys'):
value = self.__class__(value)
dict_[node] = value
except (KeyError, TypeError):
raise KeyError(key)
def __delitem__(self, key):
try:
dict_, node = self._descend(key)
del dict_[node]
except (KeyError, ValueError):
raise KeyError(key)
def _descend(self, key, fillin=False):
path = list(filter(None, key.split('.')))
if not path:
raise KeyError(key)
end = path.pop()
root = self.data
for node in path:
try:
root = root[node]
except TypeError:
raise KeyError(key)
except KeyError:
if fillin:
root[node] = self.__class__()
root = root[node]
else:
raise
return root, end
def clear(self):
self.data.clear()
class Settings:
def __init__(self):
self.__dict__.update(_settings=SettingContainer())
self.clear()
def __contains__(self, key):
return key in self._settings
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError('{0!r} object has no attribute {1!r}'.format(
type(self).__name__, name))
def __setattr__(self, name, value):
self._settings.update(name=value)
def __delattr__(self, name):
try:
del self[name]
except KeyError:
raise AttributeError('{0!r} object has no attribute {1!r}'.format(
type(self).__name__, name))
def __getitem__(self, key):
return self._settings[key]
def __setitem__(self, key, value):
self._settings[key] = value
def __delitem__(self, key):
del self._settings[key]
def _load_defaults(self):
self._settings.data['album_metadata'] = streams.Metadata()
self['metadata.tags.drop'] = set(['comment', 'legal', 'rating', 'url'])
self['metadata.cover.minsize'] = 500
self['metadata.cover.maxsize'] = 1000
self['metadata.cover.filesize'] = 250 * 1024
def clear(self):
self._settings.clear()
self._load_defaults()
def get(self, key, default=None):
return self._settings.get(key, default)
def update(self, *args, **kwargs):
self._settings.update(*args, **kwargs)
settings = Settings()
``` |
{
"source": "a1ch3m1s7/StackOverflowL",
"score": 3
} |
#### File: v1/models/party_models.py
```python
import json
parties = []
class PartyModels():
def __init__(self):
self.parties = parties
def create_party(self, name, hqAddress, logoUrl):
party = {
"party_id": len(self.parties)+1,
"name": name,
"hqAddress": hqAddress,
"logoUrl": logoUrl,
}
self.parties.append(party)
return party
def get_name(self, name):
"""Get a party with a specific name."""
for party in self.parties:
if party['name'] == name:
return json.dumps(party, default=str)
def get_hqAddress(self, hqAddress):
"""Get party by hqAddress."""
for party in self.parties:
if party['hqAddress'] == hqAddress:
return json.dumps(party, default=str)
def get_logoUrl(self, logoUrl):
"""Get party by logoUrl."""
for party in self.parties:
if party['logoUrl'] == logoUrl:
return json.dumps(party, default=str)
def get_all_parties(self):
return self.parties
def get_party_by_Id(self ,party_id):
if parties:
for party in self.parties:
if party.get('party_id') == party_id:
return party
def remove_party(self, party_id):
if parties:
for party in self.parties:
if party.get('party_id') == party_id:
parties.remove(party)
return party
def update_party(self, party_id, details):
"""Updates an existing party."""
for party in self.parties:
if party['party_id'] == party_id:
name = details.get('name')
hqAddress = details.get('hqAddress')
logoUrl = details.get('logoUrl')
if name:
party['name'] = name
if hqAddress:
party['hqAddress'] = hqAddress
if logoUrl:
party['logoUrl'] = logoUrl
return party
``` |
{
"source": "A1chemyStars/mmdetection-intermediate",
"score": 2
} |
#### File: mmdetection-intermediate/demo/data_vis.py
```python
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.metrics import roc_curve, auc
from tsnecuda import TSNE
from MulticoreTSNE import MulticoreTSNE
# from bdd_kitti_extract import kitti_bdd_map
def auroc(y, score):
fpr, tpr, thresholds = roc_curve(y, score)
print(auc(fpr, tpr))
return fpr, tpr, thresholds
if __name__ == '__main__':
fc = np.load('fcs.npy')
logit = np.load('logits.npy')
soft = np.load('softmax.npy')
pred = np.load('preds.npy')
flag = np.load('flags.npy')
fc_bdd = np.load('city_fcs.npy')
logit_bdd = np.load('city_logits.npy')
soft_bdd = np.load('city_softmax.npy')
pred_bdd = np.load('city_preds.npy')
flag_bdd = np.load('city_flags.npy')
auroc(flag, soft)
auroc(flag_bdd, soft_bdd)
# reduce = PCA(n_components=2)
# reduce = TSNE(n_components=2)
reduce = MulticoreTSNE(n_components=2, n_jobs=-1)
fc_ld = reduce.fit_transform(fc_bdd)
corr = np.where(flag_bdd == 1)[0]
corr_pred = pred_bdd[corr]
fc_ldcorr = fc_ld[corr]
fig, ax = plt.subplots(figsize=(16, 12))
for i in range(8):
ax.scatter(fc_ldcorr[corr_pred == i, 0], fc_ldcorr[corr_pred == i, 1], s=4, alpha=0.3, label='{}'.format(i))
# ax.scatter(fc_ld[flag_bdd == 0, 0], fc_ld[flag_bdd == 0, 1], alpha=0.5, c='gray')
plt.legend()
plt.savefig('/home/kengo/Pictures/bdd.pdf')
plt.show()
```
#### File: mmdetection-intermediate/demo/kitti.py
```python
import os
import json
import torch
from util import ProgressBar, Hook
from mmdet.apis import init_detector, inference_detector, show_result_pyplot
kitti_root_dir = '../data/kitti_test'
vkitti_root_dir = '../data/vkitti_test'
model_configs = {'faster_rcnn':
{'config_file': '../checkpoints/kitti/faster_rcnn/faster_rcnn_r50_fpn_1x_kitti.py',
'checkpoint': '../checkpoints/kitti/faster_rcnn/epoch_16.pth'},
'yolov3':
{'config_file': '../checkpoints/kitti/yolo/yolov3_d53_mstrain-608_273e_kitti.py',
'checkpoint': '../checkpoints/kitti/yolo/latest.pth'},
'retinanet_r50':
{'config_file': '../checkpoints/kitti/retinanet/r50/retinanet_r50_fpn_1x_kitti.py',
'checkpoint': '../checkpoints/kitti/retinanet/r50/latest.pth'},
'retinanet_r101': {'config_file': '', 'checkpoint': ''},
'ssd512': {'config_file': '', 'checkpoint': ''},
'yolox': {'config_file': '', 'checkpoint': ''},
'cornernet': {'config_file': '', 'checkpoint': ''},
'centernet': {'config_file': '', 'checkpoint': ''},
'faster_rcnn_bdd':
{'config_file': '../configs/bdd100k/faster_rcnn_r50_fpn_1x_det_bdd100k.py',
'checkpoint': '../checkpoints/bdd100k/faster_rcnn_r50_fpn_1x_det_bdd100k.pth'},
'faster_rcnn_cityscapes':
{'config_file': '../configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py',
'checkpoint': '../checkpoints/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes_20200502-829424c0.pth'}
}
def kitti_evaluate(detector='faster_rcnn'):
model = init_detector(model_configs[detector]['config_file'],
model_configs[detector]['checkpoint'], device='cuda:0')
hook_fpn = Hook(model.rpn_head.rpn_cls)
hook = Hook(model.roi_head.bbox_head.shared_fcs._modules['1'])
hook2 = Hook(model.roi_head.bbox_head.fc_cls)
hook_reg = Hook(model.roi_head.bbox_head.fc_reg)
dates = sorted(os.listdir(kitti_root_dir))
for date in dates:
date_path = os.path.join(kitti_root_dir, date)
drives = sorted(os.listdir(date_path))
for drive in drives:
infer_results = {'classes': {'Car': 0, 'Van': 1, 'Truck': 2, 'Pedestrian': 3,
'Person_sitting': 4, 'Cyclist': 5, 'Tram': 6, 'Misc': 7},
'results': []}
drive_path = os.path.join(date_path, drive)
image_select = os.path.join(drive_path, 'image_02/data')
images = sorted(os.listdir(image_select))
progress = ProgressBar(len(images), fmt=ProgressBar.FULL)
print('\nDate:', date, 'Drive:', drive)
for i, image in enumerate(images):
infer_frame = {'frame_id': i, 'objs': []}
image_path = os.path.join(image_select, image)
result = inference_detector(model, image_path)
hook.calc()
soft = torch.nn.Softmax(dim=1)
temp = soft(hook2.output)
temp = temp.data.cpu().numpy()
for cls in result:
infer_frame['objs'].append(cls.tolist())
infer_results['results'].append(infer_frame)
progress.current += 1
progress()
model.show_result(image_path, result, font_size=10, score_thr=0.7,
out_file=os.path.join('../results', date, drive, image))
with open(os.path.join('../results', date, '{}.json'.format(drive)), 'w') as f:
json.dump(infer_results, f)
def vkitti_evaluate(detector='faster_rcnn'):
model = init_detector(model_configs[detector]['config_file'],
model_configs[detector]['checkpoint'], device='cuda:0')
scenes = sorted(os.listdir(vkitti_root_dir))
for scene in scenes:
scene_path = os.path.join(vkitti_root_dir, scene)
variations = sorted(os.listdir(scene_path))
progress = ProgressBar(100, fmt=ProgressBar.FULL)
count = 0
print('\nScene:', scene)
for variation in variations:
variation_path = os.path.join(scene_path, variation)
image_select = os.path.join(variation_path, 'frames/rgb/Camera_0')
images = sorted(os.listdir(image_select))
for image in images:
image_path = os.path.join(image_select, image)
result = inference_detector(model, image_path)
count += 1
progress.current = count * 100 / (len(variations) * len(images))
progress()
model.show_result(image_path, result, font_size=10, score_thr=0.7,
out_file=os.path.join('../results', scene, variation, image))
if __name__ == '__main__':
kitti_evaluate('faster_rcnn')
``` |
{
"source": "a1d4r/devops",
"score": 2
} |
#### File: devops/app_python/__init__.py
```python
import sys
if sys.version_info >= (3, 8):
from importlib import metadata as importlib_metadata
else:
import importlib_metadata
def get_version() -> str:
try:
return importlib_metadata.version(__name__)
except importlib_metadata.PackageNotFoundError: # pragma: no cover
return "unknown"
version: str = get_version()
``` |
{
"source": "a1d4r/json2python-models",
"score": 2
} |
#### File: json2python-models/json_to_models/generator.py
```python
import re
from typing import Any, Callable, List, Optional, Pattern, Union
from .dynamic_typing import (
ComplexType,
DDict,
DList,
DOptional,
DUnion,
MetaData,
ModelPtr,
Null,
SingleType,
StringLiteral,
StringSerializable,
StringSerializableRegistry,
Unknown,
registry
)
_static_types = {float, bool, int}
class MetadataGenerator:
CONVERTER_TYPE = Optional[Callable[[str], Any]]
def __init__(
self,
str_types_registry: StringSerializableRegistry = None,
dict_keys_regex: List[Union[Pattern, str]] = None,
dict_keys_fields: List[str] = None
):
"""
:param str_types_registry: StringSerializableRegistry instance. Default registry will be used if None passed .
:param dict_keys_regex: List of RegExpressions (compiled or not).
If all keys of some dict are match one of them then this dict will be marked as dict field
but not nested model.
:param dict_keys_fields: List of model fields names that will be marked as dict field
"""
self.str_types_registry = str_types_registry if str_types_registry is not None else registry
self.dict_keys_regex = [re.compile(r) for r in dict_keys_regex] if dict_keys_regex else []
self.dict_keys_fields = set(dict_keys_fields or ())
def generate(self, *data_variants: dict) -> dict:
"""
Convert given list of data variants to metadata dict
"""
fields_sets = [self._convert(data) for data in data_variants]
fields = self.merge_field_sets(fields_sets)
return self.optimize_type(fields)
def _convert(self, data: dict):
"""
Key and string value converting
"""
fields = dict()
for key, value in data.items():
convert_dict = key not in self.dict_keys_fields
fields[key] = self._detect_type(value, convert_dict)
return fields
def _detect_type(self, value, convert_dict=True) -> MetaData:
"""
Converts json value to metadata
"""
# Simple types
t = type(value)
if t in _static_types:
return t
# List trying to yield nested type
elif t is list:
if value:
types = [self._detect_type(item) for item in value]
if len(types) > 1:
union = DUnion(*types)
if len(union.types) == 1:
return DList(*union.types)
return DList(union)
else:
return DList(*types)
else:
return DList(Unknown)
# Dict should be processed as another model if convert_dict is enabled
elif isinstance(value, dict):
if not value:
return DDict(Unknown)
for reg in self.dict_keys_regex:
if all(map(reg.match, value.keys())):
convert_dict = False
break
if convert_dict:
return self._convert(value)
else:
types = [self._detect_type(item) for item in value.values()]
if len(types) > 1:
union = DUnion(*types)
if len(union.types) == 1:
return DDict(*union.types)
return DDict(union)
else:
return DDict(*types)
# null interpreted as is and will be processed later on Union merge stage
elif value is None:
return Null
# string types trying to convert to other string-serializable types
else:
for t in self.str_types_registry:
try:
value = t.to_internal_value(value)
except ValueError:
continue
return t
return StringLiteral({value})
def merge_field_sets(self, field_sets: List[MetaData]) -> MetaData:
"""
Merge fields sets into one set of pairs (key, metadata)
"""
fields: dict = {}
first = True
for model in field_sets:
fields_diff = set(fields.keys())
for name, field in model.items():
if name not in fields:
# New field
field = field if first or isinstance(field, DOptional) else DOptional(field)
else:
field_original = fields[name]
fields_diff.remove(name)
if isinstance(field_original, DOptional):
# Existing optional field
if field_original == field or field_original.type == field:
continue
field_original = field_original.type
field = DOptional(DUnion(
*(field.types if isinstance(field, DUnion) else [field]),
*(field_original.types if isinstance(field_original, DUnion) else [field_original])
))
if len(field.type) == 1:
field.type = field.type.types[0]
else:
if field_original == field or (isinstance(field, DOptional) and field_original == field.type):
continue
field = DUnion(
*(field.types if isinstance(field, DUnion) else [field]),
*(field_original.types if isinstance(field_original, DUnion) else [field_original])
)
if len(field) == 1:
field = field.types[0]
fields[name] = field
for name in fields_diff:
# Missing fields becomes optionals
if not isinstance(fields[name], DOptional):
fields[name] = DOptional(fields[name])
first = False
return fields
def optimize_type(self, meta: MetaData, process_model_ptr=False) -> MetaData:
"""
Finds some redundant types and replace them with a simpler one
:param process_model_ptr: Control whether process ModelPtr instances or not.
Default is False to prevent recursion cycles.
"""
if isinstance(meta, dict):
fields = {}
for k, v in meta.items():
fields[k] = self.optimize_type(v)
return fields
elif isinstance(meta, DUnion):
return self._optimize_union(meta)
elif isinstance(meta, DOptional):
t = self.optimize_type(meta.type)
if isinstance(t, DOptional):
t = t.type
return meta.replace(t)
elif isinstance(meta, SingleType) and (process_model_ptr or not isinstance(meta, ModelPtr)):
# Optimize nested type
return meta.replace(self.optimize_type(meta.type))
elif isinstance(meta, ComplexType):
# Optimize all nested types
return meta.replace([self.optimize_type(nested) for nested in meta])
elif isinstance(meta, StringLiteral):
if meta.overflowed or not meta.literals:
return str
return meta
def _optimize_union(self, t: DUnion):
# Replace DUnion of 1 element with this element
# if len(t) == 1:
# return t.types[0]
# Split nested types into categories
str_types: List[Union[type, StringSerializable]] = []
types_to_merge: List[dict] = []
list_types: List[DList] = []
dict_types: List[DDict] = []
other_types: List[MetaData] = []
for item in t.types:
if isinstance(item, DOptional):
item = item.type
other_types.append(Null)
if isinstance(item, dict):
types_to_merge.append(item)
elif item in self.str_types_registry or item is str:
str_types.append(item)
elif isinstance(item, DList):
list_types.append(item)
elif isinstance(item, DDict):
dict_types.append(item)
else:
other_types.append(item)
if int in other_types and float in other_types:
other_types.remove(int)
if types_to_merge:
other_types.append(self.merge_field_sets(types_to_merge))
for cls, iterable_types in ((DList, list_types), (DDict, dict_types)):
if iterable_types:
other_types.append(cls(DUnion(*(
t.type for t in iterable_types
))))
if str in str_types:
other_types.append(str)
elif str_types:
str_types = self.str_types_registry.resolve(*str_types)
# Replace str pseudo-types with <class 'str'> when they can not be resolved into single type
other_types.append(str if len(str_types) > 1 else next(iter(str_types)))
types = [self.optimize_type(t) for t in other_types]
if len(types) > 1:
if Unknown in types:
types.remove(Unknown)
optional = False
if Null in types:
optional = True
while Null in types:
types.remove(Null)
meta_type = DUnion(*types)
if len(meta_type.types) == 1:
meta_type = meta_type.types[0]
if optional:
return DOptional(meta_type)
else:
meta_type = types[0]
return meta_type
``` |
{
"source": "a1da4/bert-japanese",
"score": 2
} |
#### File: a1da4/bert-japanese/tokenization.py
```python
import collections
import logging
import os
import unicodedata
from transformers import BertTokenizer, WordpieceTokenizer
from transformers.tokenization_bert import load_vocab
logger = logging.getLogger(__name__)
class MecabBertTokenizer(BertTokenizer):
"""BERT tokenizer for Japanese text; MeCab tokenization + WordPiece"""
def __init__(self, vocab_file, do_lower_case=False,
do_basic_tokenize=True, do_wordpiece_tokenize=True,
mecab_dict_path=None, unk_token='[UNK]', sep_token='[SEP]',
pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', **kwargs):
"""Constructs a MecabBertTokenizer.
Args:
**vocab_file**: Path to a one-wordpiece-per-line vocabulary file.
**do_lower_case**: (`optional`) boolean (default True)
Whether to lower case the input.
Only has an effect when do_basic_tokenize=True.
**do_basic_tokenize**: (`optional`) boolean (default True)
Whether to do basic tokenization with MeCab before wordpiece.
**mecab_dict_path**: (`optional`) string
Path to a directory of a MeCab dictionary.
"""
super(BertTokenizer, self).__init__(
unk_token=unk_token, sep_token=sep_token, pad_token=pad_token,
cls_token=cls_token, mask_token=mask_token, **kwargs)
self.vocab = load_vocab(vocab_file)
print(self.vocab)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens
print(self.max_len_single_sentence)
print(self.max_len_sentences_pair)
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'.".format(vocab_file))
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
self.do_wordpiece_tokenize = do_wordpiece_tokenize
if do_basic_tokenize:
self.basic_tokenizer = MecabBasicTokenizer(do_lower_case=do_lower_case,
mecab_dict_path=mecab_dict_path)
if do_wordpiece_tokenize:
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab,
unk_token=self.unk_token)
def _tokenize(self, text):
if self.do_basic_tokenize:
tokens = self.basic_tokenizer.tokenize(text,
never_split=self.all_special_tokens)
else:
tokens = [text]
if self.do_wordpiece_tokenize:
split_tokens = [sub_token for token in tokens
for sub_token in self.wordpiece_tokenizer.tokenize(token)]
else:
split_tokens = tokens
return split_tokens
class MecabCharacterBertTokenizer(BertTokenizer):
"""BERT character tokenizer for with information of MeCab tokenization"""
def __init__(self, vocab_file, do_lower_case=False, do_basic_tokenize=True,
mecab_dict_path=None, unk_token='[UNK]', sep_token='[SEP]',
pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', **kwargs):
"""Constructs a MecabCharacterBertTokenizer.
Args:
**vocab_file**: Path to a one-wordpiece-per-line vocabulary file.
**do_lower_case**: (`optional`) boolean (default True)
Whether to lower case the input.
Only has an effect when do_basic_tokenize=True.
**do_basic_tokenize**: (`optional`) boolean (default True)
Whether to do basic tokenization with MeCab before wordpiece.
**mecab_dict_path**: (`optional`) string
Path to a directory of a MeCab dictionary.
"""
super(BertTokenizer, self).__init__(
unk_token=unk_token, sep_token=sep_token, pad_token=pad_token,
cls_token=cls_token, mask_token=mask_token, **kwargs)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'.".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = MecabBasicTokenizer(do_lower_case=do_lower_case,
mecab_dict_path=mecab_dict_path,
preserve_spaces=True)
self.wordpiece_tokenizer = CharacterTokenizer(vocab=self.vocab,
unk_token=self.unk_token,
with_markers=True)
def _convert_token_to_id(self, token):
"""Converts a token (str/unicode) to an id using the vocab."""
if token[:2] == '##':
token = token[2:]
return self.vocab.get(token, self.vocab.get(self.unk_token))
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) to a single string."""
out_string = ' '.join(tokens).replace('##', '').strip()
return out_string
class MecabBasicTokenizer(object):
"""Runs basic tokenization with MeCab morphological parser."""
def __init__(self, do_lower_case=False, never_split=None,
mecab_dict_path=None, preserve_spaces=False):
"""Constructs a MecabBasicTokenizer.
Args:
**do_lower_case**: (`optional`) boolean (default True)
Whether to lower case the input.
**mecab_dict_path**: (`optional`) string
Path to a directory of a MeCab dictionary.
**preserve_spaces**: (`optional`) boolean (default True)
Whether to preserve whitespaces in the output tokens.
"""
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = never_split
import MeCab
if mecab_dict_path is not None:
self.mecab = MeCab.Tagger('-d {}'.format(mecab_dict_path))
else:
self.mecab = MeCab.Tagger()
self.preserve_spaces = preserve_spaces
def tokenize(self, text, never_split=None, with_info=False, **kwargs):
"""Tokenizes a piece of text."""
never_split = self.never_split + (never_split if never_split is not None else [])
text = unicodedata.normalize('NFKC', text)
tokens = []
token_infos = []
cursor = 0
for line in self.mecab.parse(text).split('\n'):
if line == 'EOS':
if self.preserve_spaces and len(text[cursor:]) > 0:
tokens.append(text[cursor:])
token_infos.append(None)
break
token, token_info = line.split('\t')
token_start = text.index(token, cursor)
token_end = token_start + len(token)
if self.preserve_spaces and cursor < token_start:
tokens.append(text[cursor:token_start])
token_infos.append(None)
if self.do_lower_case and token not in never_split:
token = token.lower()
tokens.append(token)
token_infos.append(token_info)
cursor = token_end
assert len(tokens) == len(token_infos)
if with_info:
return tokens, token_infos
else:
return tokens
class CharacterTokenizer(object):
"""Runs Character tokenziation."""
def __init__(self, vocab, unk_token,
max_input_chars_per_word=100, with_markers=True):
"""Constructs a CharacterTokenizer.
Args:
vocab: Vocabulary object.
unk_token: A special symbol for out-of-vocabulary token.
with_markers: If True, "#" is appended to each output character except the
first one.
"""
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
self.with_markers = with_markers
def tokenize(self, text):
"""Tokenizes a piece of text into characters.
For example:
input = "apple"
output = ["a", "##p", "##p", "##l", "##e"] (if self.with_markers is True)
output = ["a", "p", "p", "l", "e"] (if self.with_markers is False)
Args:
text: A single token or whitespace separated tokens.
This should have already been passed through `BasicTokenizer`.
Returns:
A list of characters.
"""
output_tokens = []
for i, char in enumerate(text):
if char not in self.vocab:
output_tokens.append(self.unk_token)
continue
if self.with_markers and i != 0:
output_tokens.append('##' + char)
else:
output_tokens.append(char)
return output_tokens
``` |
{
"source": "a1da4/pmi-semantic-difference",
"score": 2
} |
#### File: pmi-svd/constrained/train.py
```python
import argparse
import pickle
from model import SimplifiedDynamicWordEmbeddigs
from util import cos_sim, calculate_similarities, roc, plot_roc, plot_loss
def main(args):
print(args)
fp = open(args.id_to_word, 'rb')
id_to_word = pickle.load(fp)
time_bins = len(args.ppmi_pathes)
dev_words = []
with open(args.dev_list) as fp:
for line in fp:
word = line.strip()
dev_words.append(word)
model = SimplifiedDynamicWordEmbeddigs(time_bins=time_bins,
dim=args.dim,
tau=args.tau,
es=args.es)
model.load_ppmi_matrix(args.ppmi_pathes, len(id_to_word))
losses, best_loss, best_Ws, best_Us = model.train(args.n_iter, args.seed)
print(f'Best_loss: {losses.index(best_loss)}epoch, {best_loss}')
auc, fp, tp = roc(best_Ws, id_to_word)
print(f'auc_wo_norm: {auc}')
plot_roc(fp, tp, auc, args.tau, args.seed)
auc, fp, tp = roc(best_Ws, id_to_word, dev_words, with_norm=False)
print(f'auc_w_norm: {auc}')
plot_roc(fp, tp, auc, args.tau, args.seed, with_nirm=True)
plot_loss(losses, args.tau, args.seed)
fp = open(f'./Ws_d-{args.dim}_t-{args.tau}_seed-{args.seed}.pkl', 'wb')
pickle.dump(best_Ws, fp)
def cli_main():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--ppmi_pathes', nargs='*', help='path of ppmi-matrixes.')
parser.add_argument('-s', '--seed', type=int, default=1, help='int, random seed.')
parser.add_argument('-d', '--dim', type=int, default=100, help='int, dimension of word embedding.')
parser.add_argument('-n', '--n_iter', type=int, default=5, help='int, iteration of training.')
parser.add_argument('-t', '--tau', type=float, default=50, help='int, hyperparameter. Strength of kalman-filter in main embedding.')
parser.add_argument('-e', '--es', type=int, default=3, help='int, patients of early stopping.')
parser.add_argument('-p', '--id_to_word', help='path of id_to_word dictionary')
parser.add_argument("-l", "--dev_list", help="path of dev word list")
args = parser.parse_args()
main(args)
if __name__ == '__main__':
cli_main()
```
#### File: preprocess/coha-preprocess/utils.py
```python
import re
import string
# 01234 + '-'|'.'|',' + 56789
NUM = re.compile(r"^\d*[-\./,]*\d+$")
PUNCT = set(string.punctuation)
PUNCT.add("--")
AFFIX = set(["n't", "'s", "'d", "'t"])
def process_lemma_line(line):
"""preprocess
:return lemma:
:return lemma_pos:
:return pos_tags:
"""
info = line.split()
if len(info) != 3:
return None, None, None, None
word = clean_word_permissive(info[0])
if word == None:
return None, None, None, None
lemma = info[1]
pos_tags = clean_pos(info[2])
if pos_tags[0] == "":
lemma_pos = None
else:
lemma_pos = lemma + "_" + pos_tags[0][:2]
return word, lemma, lemma_pos, pos_tags
def clean_word_permissive(word):
"""clean word
:return word: number is tokenized, and (PUNCT, AFFIX) are reshaped
"""
if word == "@" or word == "<p>":
return None
elif word in PUNCT:
return None
elif word in AFFIX:
return None
else:
word = word.strip().strip("*").lower()
if NUM.match(word):
word = "<NUM>"
return word
def clean_pos(pos):
"""clean and split tags
:return tags: list of tags in a word
"""
tags = []
for tag in pos.split("_"):
tag = tag.strip("@")
tag = tag.strip("%")
tags.append(tag)
return tags
``` |
{
"source": "a1eaiactaest/aclass",
"score": 3
} |
#### File: aclass/aclass/__main__.py
```python
import sys
import os
current = os.path.dirname(os.path.realpath(__file__))
def join(subject, data):
import json
import webbrowser
url = data[subject]
webbrowser.open(url,new=0,autoraise=False)
def helpm():
help_message = '''
usage: aclass [OPTION] {ARGUMENT}
Join your classes.
For usage and help visit: https://github.com/a1eaiactaest/aclass
arguments:
-h, --help display this help
--configure configure aclass by writing file with your classes.
--join {class} join to your class. Passing object from classes.json file as argument.
--edit edit classes.json file, it contains links to your online classes
'''
print(help_message)
def main():
try:
argument = sys.argv[1]
if argument == '--configure':
import urllib.request
# download file from gh repo and open it in vi for user to edit it
url = 'https://raw.githubusercontent.com/a1eaiactaest/aclass/master/docs/classes.json'
urllib.request.urlretrieve(url, f'{current}/classes.json')
os.system(f'vi {current}/classes.json')
print('Configuration complete, running this procces again will overwrite existing data. Run --edit to edit this file again.')
if argument == '--join':
# create second argument and take value from json file
import json
key = sys.argv[2]
data = json.load(open(f'{current}/classes.json', 'r'))
if key in data:
join(key, data)
else:
helpm()
if argument == '--edit':
# basically works same as --configure but doesnt fetch classes.json from repo
os.system(f'vi {current}/classes.json')
print(f'Your classes.json file is {current} directory')
if argument == '--help' or argument == '-h':
helpm()
except IndexError:
helpm()
if __name__ == "__main__":
main()
``` |
{
"source": "a1eaiactaest/dht11",
"score": 3
} |
#### File: api/common/parser.py
```python
import time
import json
from typing import List, Iterable
def parse_to_dict(input_data: List[str]) -> str:
# check if input_data is VALID here or before calling this function
parsed_data = {
"time": input_data[0],
"id": input_data[1],
"air_pres": input_data[2],
"voc": input_data[3], # it's volatile organic compound
"air_temp": input_data[4],
"air_hum": input_data[5],
"gnd_temp": input_data[6],
"gnd_hum": input_data[7],
}
return parsed_data
# [(3,), (5,), (11,), (103,)]
def parse_stations(input_data: List[tuple]) -> tuple:
ret = []
for station in input_data:
ret.append(station[0])
return str(ret)
def parse_to_list(input_data: List[str]) -> List[str]:
parsed_data = [str(int(time.time()))] + input_data
return parsed_data
def pretty_json(parsed_str: str) -> None:
print(json.dumps(json.loads(parsed_str), indent=4, sort_keys=False))
def pretty_db_dump(dump: List[tuple]) -> None:
# debug and developement use recommended
for subd in dump:
print(subd)
pretty_json(parse_to_dict(subd))
if __name__ == "__main__":
example_data = ['103', '949.00', '158.00', '23.00', '50.00', '2.00', '84.00']
pretty_json(parse(example_data))
``` |
{
"source": "a1eaiactaest/osmon",
"score": 3
} |
#### File: osmon/tests/t1.py
```python
import argparse
import sys
parser = argparse.ArgumentParser(description='test')
parser.add_argument('-t', action='store_true')
res = parser.parse_args()
def test():
x = 1+1
print(x)
print(res.t)
if res.t:
test()
``` |
{
"source": "a1eko/treem",
"score": 3
} |
#### File: treem/commands/repair.py
```python
import math
from itertools import chain
import numpy as np
from treem import Morph, SWC
from treem.utils.geom import repair_branch, sample, norm, rotation
def repair(args):
"""Corrects morphology reconstruction at the given nodes."""
# pylint: disable=invalid-name
# pylint: disable=too-many-locals
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
# pylint: disable=cell-var-from-loop
# pylint: disable=expression-not-assigned
vprint = print if args.verbose else lambda *a, **k: None
morph = Morph(args.file)
err = 0
if args.translate:
morph.data[:, SWC.XYZ] += np.array(args.translate)
if args.rotate:
morph.rotate([1, 0, 0], args.rotate[0]/180*math.pi)
morph.rotate([0, 1, 0], args.rotate[1]/180*math.pi)
morph.rotate([0, 0, 1], args.rotate[2]/180*math.pi)
if args.shrink_xy:
scale = np.array([args.shrink_xy, args.shrink_xy, 1])
origin = morph.root.coord().copy()
for node in morph.root.walk():
coord = node.coord()
coord *= scale
shift = origin - morph.root.coord()
morph.translate(shift)
if args.shrink:
if args.bottom_up:
bottom = max(x.coord()[2] for x in morph.root.walk())
else:
bottom = min(x.coord()[2] for x in morph.root.walk())
for node in morph.root.walk():
z = node.coord()[2]
z = bottom + args.shrink * (z - bottom)
node.v[SWC.Z] = z
if args.zjump:
nodes = [x for x in morph.root.walk() if x.ident() in args.zjump]
for node in nodes:
jump = node.parent.coord()[2] - node.coord()[2]
if args.zjump_mode == 'align':
shift = [0, 0, jump]
morph.translate(shift, node)
elif args.zjump_mode == 'split':
shift = [0, 0, jump/2]
jump_sec = list(node.section())
for split_node in jump_sec[:int(len(jump_sec)/2)]:
morph.move(shift, split_node)
elif args.zjump_mode == 'tilt':
parent = node.parent
dist = max(norm(parent.coord()-jump_node.coord())
for jump_node in node.leaves())
leng = max(norm(node.coord()-jump_node.coord())
for jump_node in node.leaves())
leaf = [jump_node for jump_node in node.leaves()
if norm(node.coord()-jump_node.coord()) == leng][0]
vdir = leaf.coord() - node.parent.coord()
shift = [0, 0, jump*leng/dist]
morph.translate(shift, node)
udir = leaf.coord() - node.coord()
axis, angle = rotation(udir, vdir)
morph.rotate(axis, angle, node)
elif args.zjump_mode == 'join':
parent = node.parent
dist = max(norm(parent.coord()-jump_node.coord())
for jump_node in node.leaves())
leng = max(norm(node.coord()-jump_node.coord())
for jump_node in node.leaves())
leaf = [jump_node for jump_node in node.leaves()
if norm(node.coord()-jump_node.coord()) == leng][0]
vdir = leaf.coord() - node.parent.coord()
shift = [0, 0, jump*leng/dist]
morph.translate(shift, node)
udir = leaf.coord() - node.coord()
axis, angle = rotation(udir, vdir)
morph.rotate(axis, angle, node)
start = list(node.section(reverse=True))[-1].parent
dist = max(norm(start.coord()-jump_node.coord())
for jump_node in node.leaves())
leng = morph.length(node.parent.section(reverse=True))
vdir = leaf.coord() - start.coord()
shift = [0, 0, -jump*leng/dist]
morph.translate(shift, node.parent)
udir = leaf.coord() - node.coord()
axis, angle = rotation(udir, vdir)
morph.rotate(axis, angle, node)
if args.pool:
pool = [Morph(f) for f in args.pool]
if args.diam:
nodes = [x for x in morph.root.walk() if x.ident() in args.diam]
if args.pool:
types = {x.type() for x in nodes}
if args.diam_mode == 'joint':
for node in nodes:
r = 0
if node.parent.type() != SWC.SOMA:
r += node.parent.radius()
r /= 2
if not node.is_fork():
r += node.siblings[0].radius()
r /= 2
if r:
node.v[SWC.R] = r
else:
vprint(f'diam in node {node.ident()} not repaired')
err += 1
elif args.diam_mode == 'sec':
for node in nodes:
sec = list(node.section(reverse=True))
sec = list(sec[-1].section())
r = morph.radii(sec).mean()
node.v[SWC.R] = r
elif args.diam_mode == 'order':
for node in nodes:
point_type = node.type()
order = node.order()
if args.pool:
radii = [m.radii(sec).mean() for m in pool
for sec in m.root.sections()
if sec[0].type() in types
and sec[0].order() == order]
if radii:
r = np.mean(radii)
node.v[SWC.R] = r
else:
vprint(f'diam in node {node.ident()} (order {order}) '
f'not repaired')
err += 1
else:
r = np.array([morph.radii(sec).mean()
for sec in morph.root.sections()
if sec[0].type() == point_type
and sec[0].order() == order]).mean()
node.v[SWC.R] = r
elif args.diam_mode == 'breadth':
for node in nodes:
point_type = node.type()
breadth = node.breadth()
if args.pool:
radii = [m.radii(sec).mean() for m in pool
for sec in m.root.sections()
if sec[0].type() in types
and sec[0].breadth() == breadth]
if radii:
r = np.mean(radii)
node.v[SWC.R] = r
else:
vprint(f'diam in node {node.ident()} '
f'(breadth {breadth}) not repaired')
err += 1
else:
r = np.array([morph.radii(sec).mean()
for sec in morph.root.sections()
if sec[0].type() == point_type
and sec[0].breadth() == breadth]).mean()
node.v[SWC.R] = r
if args.seed:
np.random.seed(args.seed)
if args.cut: # pylint: disable=too-many-nested-blocks
types = {x.type() for x in morph.root.walk() if x.ident() in args.cut}
for point_type in types:
intact_branches = dict()
if args.pool:
for rec in pool:
sections = filter(lambda x: x[0].type() == point_type,
rec.root.sections())
nodes = chain(x[0] for x in sections)
for node in nodes:
order = node.order()
if order not in intact_branches:
intact_branches[order] = list()
intact_branches[order].append((rec, node))
else:
sections = filter(lambda x: x[0].type() == point_type,
morph.root.sections())
nodes = chain(x[0] for x in sections)
def is_intact(tree, cuts):
leaves = [x.ident() for x in tree.leaves()]
return set(leaves).isdisjoint(cuts)
nodes = filter(lambda x: is_intact(x, args.cut), nodes)
for node in nodes:
order = node.order()
if order not in intact_branches:
intact_branches[order] = list()
intact_branches[order].append((morph, node))
nodes = [x for x in morph.root.walk() if x.type() == point_type
and x.ident() in args.cut]
for node in nodes:
order = node.order()
vprint(f'repairing node {node.ident()} (order {order})',
end=' ')
if order in intact_branches:
idx = np.random.choice(len(intact_branches[order]))
rec, rep = intact_branches[order][idx]
vprint(f'using {rep.ident()} (order {order}) ...', end=' ')
done = repair_branch(morph, node, rec, rep,
force=args.force_repair)
err += 1 if not done else 0
vprint('done') if done else vprint('not repaired')
elif order - 1 in intact_branches:
idx = np.random.choice(len(intact_branches[order-1]))
rec, rep = intact_branches[order-1][idx]
vprint(f'using {rep.ident()} (order {order-1}) ...',
end=' ')
done = repair_branch(morph, node, rec, rep,
force=args.force_repair)
err += 1 if not done else 0
vprint('done') if done else vprint('not repaired')
elif args.force_repair:
if intact_branches:
order = np.random.choice(list(intact_branches.keys()))
idx = np.random.choice(len(intact_branches[order]))
rec, rep = intact_branches[order][idx]
vprint(f'using {rep.ident()} (order {order}) ...',
end=' ')
done = repair_branch(morph, node, rec, rep, force=True)
err += 1 if not done else 0
vprint('done') if done else vprint('not repaired')
else:
err += 1
vprint('... no intact branches, not repaired')
else:
err += 1
vprint('... not repaired')
if args.delete and not args.cut:
nodes = [x for x in morph.root.walk() if x.ident() in args.delete]
for node in nodes:
morph.delete(node)
if args.delete or args.cut:
morph = Morph(data=morph.data)
if args.res:
ident = 1
data = list()
idmap = {-1: -1}
for sec in filter(lambda x: x[0].type() == SWC.SOMA,
morph.root.sections()):
for node in sec:
v = node.v.copy()
i, p = v[SWC.I].astype(int), v[SWC.P].astype(int)
v[SWC.I], v[SWC.P] = ident, idmap[p]
idmap[i] = ident
data.append(v)
ident += 1
for sec in filter(lambda x: x[0].type() in
set(SWC.TYPES).difference((SWC.SOMA,)),
morph.root.sections()):
length = morph.length(sec)
points = morph.points(sec)
# pylint: disable=unsubscriptable-object
points = np.insert(points, 0, sec[0].parent.v[SWC.XYZR], axis=0)
points = sample(points, np.ceil(length/args.res).astype(int))
points = points[1:]
start = True
for ident, point in enumerate(points, ident):
x, y, z, r = point
pid = idmap[sec[0].parent_ident()] if start else ident - 1
v = np.array([ident, sec[0].type(), x, y, z, r, pid])
start = False if start else start
data.append(v)
idmap[sec[-1].v[SWC.I]] = ident
ident += 1
morph = Morph(data=np.array(data))
if args.center:
morph.data[:, SWC.XYZ] -= morph.root.coord()
morph.save(args.out)
return err
``` |
{
"source": "A1eksAwP/GB-Internet-Store",
"score": 2
} |
#### File: geekshop/cartapp/models.py
```python
from django.db import models
from django.conf import settings
from numpy import delete
from mainapp.models import Product
class CartQuerySet(models.query.QuerySet):
def delete(self, *args, **kwargs):
for item in self:
item.product.quantity += item.quantity
item.product.save()
super().delete(*args, **kwargs)
class CartManager(models.Manager):
def count(self):
return len(self.all())
def sum(self):
cart_sum = 0
for item in self.all():
cart_sum += item.product.price * item.quantity
return cart_sum
class Cart(models.Model):
class Meta:
unique_together = ['user','product']
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='cart')
product = models.ForeignKey(Product, on_delete=models.CASCADE)
quantity = models.PositiveIntegerField(verbose_name='количество', default=0)
add_datetime = models.DateTimeField(verbose_name='время', auto_now_add=True)
objects = CartManager()
def save(self, *args, **kwargs):
if self.pk:
old_cart = Cart.objects.get(pk=self.pk)
self.product.quantity -= self.quantity - old_cart.quantity
else:
self.product.quantity -= self.quantity
self.product.save()
super().save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.product.quantity += self.quantity
self.product.save()
super().save(*args, **kwargs)
@property
def cost(self):
return self.product.price*self.quantity
def __str__(self):
return f'{self.product.name} - {self.quantity}шт'
```
#### File: geekshop/mainapp/views.py
```python
import json
from turtle import title
from django.shortcuts import get_object_or_404, render
from .models import ProductCategory, Product
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# Create your views here.
def main(request):
with open ('./products.json', 'r', encoding='utf-8') as file:
jsonproducts = json.load(file)
title = 'главная'
products = Product.objects.all()
categorys = ProductCategory.objects.all()
return render(request, 'mainapp/index.html', context = {
'title': title,
'products': jsonproducts,
'foods': products,
'categorys': categorys,
})
@login_required
def get_product(request, store_id):
title = 'Страница продукта'
products = Product.objects.all()
item = get_object_or_404(Product, pk=store_id)
content = {
'title': title,
'category': category,
'products': products,
'item': item,
}
return render(request, 'mainapp/product_about.html', content)
@login_required
def products(request, pk=0, page=1):
title = 'продукты'
links_menu = ProductCategory.objects.all()
if pk is not None:
if pk == 0:
products = Product.objects.filter(category__is_active=True).order_by('price')
category = {
'pk': 0,
'name': 'все',
}
else:
category = get_object_or_404(ProductCategory, pk=pk)
products = Product.objects.filter(category__pk=pk).order_by('price')
paginator = Paginator(products, 2)
try:
products_paginator = paginator.page(page)
except PageNotAnInteger:
products_paginator = paginator.page(1)
except EmptyPage:
products_paginator = paginator.page(paginator.num_pages)
content = {
'title': title,
'links_menu': links_menu,
'category': category,
'products': products_paginator,
'page': products_paginator,
'paginator': paginator,
}
return render(request, 'mainapp/products_list.html', content)
def category(request, pk):
return products(request)
def contact(request):
return render(request, 'mainapp/contact.html', context = {
'title':'Контакты',
})
def about(request):
return render(request, 'mainapp/about.html', context = {
'title':'О нас',
})
def base(request):
return render(request, 'mainapp/base.html', context= {
'title':'!секретная страница!'
})
``` |
{
"source": "A1essandr0/vkgroups",
"score": 3
} |
#### File: A1essandr0/vkgroups/data_prep.py
```python
import pandas as pd
from typing import Union
from config import csvpath
Tablename = Union[str, pd.DataFrame]
def subscribers_tables_merge(tablename1: Tablename, tablename2: Tablename, csv_path=csvpath, verbose=True):
"""
Сводит таблицы, полученные загрузчиком, в одну. Может принимать pandas.DataFrame или имя группы, в этом
случае группа должна быть в списке групп, а соответствующий файл - в <csv_path>
"""
if isinstance(tablename1, pd.DataFrame):
table1 = tablename1
else:
table1 = pd.read_csv(csv_path + tablename1 + '.csv', sep=";", header=0, dtype=str)
if isinstance(tablename2, pd.DataFrame):
table2 = tablename2
else:
table2 = pd.read_csv(csv_path + tablename2 + '.csv', sep=";", header=0, dtype=str)
concatenated = table1.append(table2, ignore_index=True)
# Выявляем тех, кто подписан на несколько групп
# Условие gs_x != gs_x проверяет, не является ли значение NaN
outer_joined = pd.merge(table1[{'id', 'group_subscribed'}],
table2[{'id', 'group_subscribed'}],
on='id', how='outer')
outer_joined['groups'] = outer_joined['group_subscribed_x'] + ',' + outer_joined['group_subscribed_y']
outer_joined.loc[ outer_joined.group_subscribed_x != outer_joined.group_subscribed_x,
'groups'] = outer_joined.group_subscribed_y
outer_joined.loc[ outer_joined.group_subscribed_y != outer_joined.group_subscribed_y,
'groups'] = outer_joined.group_subscribed_x
# Сводим воедино и чистим
left_joined = pd.merge(concatenated, outer_joined[{'id', 'groups'}], on='id', how='left')
left_joined['group_subscribed'] = left_joined['groups']
L = left_joined.drop_duplicates('id')
if verbose:
print("{0} и {1} обработаны".format(str(tablename1), str(tablename2)))
return L[L.columns[0:6]]
if __name__ == "__main__":
print("Этот модуль должен вызываться из другой программы")
```
#### File: A1essandr0/vkgroups/tests.py
```python
import unittest
import pandas as pd
import data_loader
import data_loader_async
import data_prep
from lib import vkApiRequestSync
test_group_name = 'angelvivaldi'
class AsyncTestRamblerGroups(unittest.IsolatedAsyncioTestCase):
# Тестируем работу асинхронного загрузчика на некоторой группе
async def test_async_data_load(self):
subscribers_names = await data_loader_async.async_request_fixed_params(group_id=test_group_name, offset=0, verbose=False)
self.assertIn('first_name', subscribers_names['items'][1])
self.assertIn('last_name', subscribers_names['items'][1])
class TestRamblerGroups(unittest.TestCase):
"""
Для тестов нужно нормальное интернет-соединение и актуальный access_token к VK api
"""
def test_api_response(self):
# Тестируем, приходит ли нормальный response
groups_info_parameters = {
'group_id': test_group_name,
'fields': ",".join(data_loader.group_data_fields),
'access_token': data_loader.access_token, 'v': data_loader.api_version}
subscribers_count = vkApiRequestSync(api_method="groups.getById", parameters=groups_info_parameters)
self.assertIsInstance(subscribers_count[0]['members_count'], int)
def test_data_load(self):
# Тестируем работу загрузчика на некоторой группе
group_member_parameters = {
'group_id': test_group_name,
'fields': ",".join(data_loader.user_data_fields),
'access_token': data_loader.access_token, 'v': data_loader.api_version,
'sort': 'id_asc', 'count': 1000, 'offset': 0
}
subscribers_names = vkApiRequestSync(api_method="groups.getMembers", parameters=group_member_parameters)
self.assertIn('first_name', subscribers_names['items'][1])
self.assertIn('last_name', subscribers_names['items'][1])
def test_data_merge(self):
# Тестируем, корректно ли мерджатся таблицы
lj = pd.DataFrame({ 'id': ['219', '512', '550', '628', '834'],
'first_name': ['Роман', 'Егор', 'Назарій', 'Василий', 'Сергей'],
'last_name': ['Акамёлков', 'Деметрадзе', 'Куля', 'Ефанов', 'Тарасов'],
'bdate': ['NULL', 'NULL', 'NULL', 'NULL', 'NULL'],
'city': ['NULL', 'NULL', 'Львов', 'Москва', 'Санкт-Петербург'],
'group_subscribed': ['livejournal', 'livejournal', 'livejournal', 'livejournal', 'livejournal'] })
champ = pd.DataFrame({ 'id': ['219', '309', '347', '348', '374'],
'first_name': ['Роман', 'Ilya', 'Андрей', 'Галина', 'Сергей'],
'last_name': ['Акамёлков', 'Krivonogov', 'Бойко', 'Румянцева', 'Волошин'],
'bdate': ['NULL', '18.10.1988', 'NULL', 'NULL', '3.3.1985'],
'city': ['NULL', 'Санкт-Петербург', 'NULL', 'NULL', 'Санкт-Петербург'],
'group_subscribed': ['championat', 'championat', 'championat', 'championat', 'championat'] })
afisha = pd.DataFrame({ 'id': ['109', '348', '619', '628', '834'],
'first_name': ['Ольга', 'Галина', 'Алексей', 'Василий', 'Сергей'],
'last_name': ['Горюнова', 'Румянцева', 'Бардаш', 'Ефанов', 'Тарасов'],
'bdate': ['22.3', 'NULL', 'NULL', 'NULL', 'NULL'],
'city': ['Санкт-Петербург', 'NULL', 'NULL', 'Москва', 'Санкт-Петербург'],
'group_subscribed': ['afisha', 'afisha', 'afisha', 'afisha', 'afisha'] })
resulting_groups = ['afisha', 'afisha',
'championat', 'championat', 'championat',
'championat,afisha',
'livejournal', 'livejournal',
'livejournal,afisha', 'livejournal,afisha',
'livejournal,championat']
merged = data_prep.subscribers_tables_merge(lj, champ, verbose=False)
merged = data_prep.subscribers_tables_merge(merged, afisha, verbose=False)
self.assertEqual(sorted(list(merged['group_subscribed'])), resulting_groups)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "a1ex4/ownfoil",
"score": 3
} |
#### File: ownfoil/libs/gen_shop.py
```python
import os, json, sys, time
from consts import *
from jsonc_parser.parser import JsoncParser
import logging
logging.basicConfig(format='%(asctime)s | %(levelname)s: %(message)s', level=logging.DEBUG)
path = sys.argv[1]
def getDirsAndFiles(path):
entries = os.listdir(path)
allFiles = list()
allDirs = list()
for entry in entries:
fullPath = os.path.join(path, entry)
if os.path.isdir(fullPath):
allDirs.append(fullPath)
dirs, files = getDirsAndFiles(fullPath)
allDirs += dirs
allFiles += files
else:
if fullPath.split('.')[-1] in valid_ext:
allFiles.append(fullPath)
return allDirs, allFiles
while True:
logging.info(f'Start scanning directory "{path}"')
dirs = []
games = []
shop = default_shop
template_file = os.path.join(path, template_name)
if not os.path.isfile(template_file):
logging.warning(f'Template file {template_file} not found, will use default shop template')
else:
try:
shop = JsoncParser.parse_file(template_file)
except Exception as e:
logging.warning(f'Error parsing template file {template_file}, will use default shop template, error was:\n{e}')
dirs, files = getDirsAndFiles(path)
rel_dirs = [os.path.join('..', os.path.relpath(s, path)) for s in dirs]
rel_files = [os.path.join('..', os.path.relpath(s, path)) for s in files]
logging.info(f'Found {len(dirs)} directories, {len(files)} game/save files')
for game, rel_path in zip(files, rel_files):
size = round(os.path.getsize(game))
games.append(
{
'url': rel_path,
'size': size
})
shop['directories'] = rel_dirs
shop['files'] = games
for a in ['json', 'tfl']:
out_file = os.path.join(path, f'shop.{a}')
try:
with open(out_file, 'w') as f:
json.dump(shop, f, indent=4)
logging.info(f'Successfully wrote {out_file}')
except Exception as e:
logging.error(f'Failed to write {out_file}, error was:\n{e}')
time.sleep(scan_interval * 60)
``` |
{
"source": "A1exander-Pro/real_estate_lesson",
"score": 2
} |
#### File: property/migrations/0009_auto_20210209_1430.py
```python
import phonenumbers
from django.db import migrations
def phonenumber_format(apps, schema_editor):
Flat = apps.get_model('property', 'Flat')
for flat in Flat.objects.all():
phone = flat.owners_phonenumber
parsed_phone = phonenumbers.parse(phone, 'RU')
if phonenumbers.is_valid_number(parsed_phone):
flat.owner_pure_phone = phonenumbers.format_number(parsed_phone, phonenumbers.PhoneNumberFormat.E164)
flat.save()
else:
flat.owner_pure_phone = ''
flat.save()
class Migration(migrations.Migration):
dependencies = [
('property', '0008_flat_owner_pure_phone'),
]
operations = [
migrations.RunPython(phonenumber_format)
]
```
#### File: property/migrations/0011_auto_20210210_1159.py
```python
from django.db import migrations
def load_objects(apps, schema_editor):
Flat = apps.get_model('property', 'Flat')
Owner = apps.get_model('property', 'Owner')
flats = Flat.objects.all()
for flat in flats:
Owner.objects.get_or_create(full_name=flat.owner,
phone_number=flat.owners_phonenumber,
pure_phone_number=flat.owner_pure_phone,
)
class Migration(migrations.Migration):
dependencies = [
('property', '0010_owner'),
]
operations = [
migrations.RunPython(load_objects)
]
``` |
{
"source": "A1-exe/bufferoverflow",
"score": 3
} |
#### File: A1-exe/bufferoverflow/fuzzer.py
```python
import sys
import socket
import argparse
import subprocess
import time
#####################
# CONNECTIONS #
#####################
HOST = 'localhost'
PORT = 1337
timeout = 5
#####################
# MISC #
#####################
# Payload modifiers
lengthsize = 100
length = 1
# Banner
paintBanner = True
#####################
# FUNCTIONS #
#####################
payload = ''
def generatePayload(sending):
return (sending * (length * lengthsize))
def main():
parser = argparse.ArgumentParser(description='A generic fuzzer.')
parser.add_argument('host', help='The target host')
parser.add_argument('port', help='The target port')
parser.add_argument('prefix', help='The fuzzing prefix')
parser.add_argument('-f', help='Fuzzing character', metavar='char', dest='sending', default='A')
parser.add_argument('-t', help='Timeout in seconds', metavar='number', dest='timeout', default=5)
parser.add_argument('-l', help='Send the prefix <number> times before the data', metavar='number', dest='preloop', default=0)
parser.add_argument('-d', help='Eat <number> responses between sending data', metavar='number', dest='digest', default=0)
parser.add_argument('--no-banner', help='Don\'t grab banner', dest='banner', action='store_false')
parser.set_defaults(banner=True)
# Globals
global payload
global timeout
global length
global lengthsize
args = parser.parse_args()
HOST = str(args.host)
PORT = abs(int(args.port))
timeout = abs(int(args.timeout))
prefix = args.prefix.encode('utf-8')
sending = args.sending.encode('utf-8')
preloop = abs(int(args.preloop))
digest = abs(int(args.digest))
grabBanner = args.banner
while True:
try: # Detect crash
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT)) # Establish connection
s.settimeout(timeout) # Set timeout
if (grabBanner):
banner = s.recv(1024) # Get the banner
if (length == 1): # Output banner
print(banner.decode('utf-8') if paintBanner else banner)
counter = 0
while True:
if (counter > (digest + preloop)):
break
if (counter < preloop):
s.sendall(prefix)
if (counter < digest):
s.recv(1024)
counter += 1
payload = generatePayload(sending)
print('[+] Sending %s bytes...' % len(payload))
s.sendall(prefix + payload + b'\n')
s.recv(1024)
print('[+] Done...')
length += 1
except Exception as e: # Handle crash
print('[-] Fuzzer crashed at %s bytes!' % len(payload))
print(e)
sys.exit(0) # Terminate program
time.sleep(1)
if __name__ == "__main__":
main()
``` |
{
"source": "A1eXFei/StockMarket3",
"score": 3
} |
#### File: A1eXFei/StockMarket3/StockExporter.py
```python
import os
import logging
from logging.config import fileConfig
from util import DatabaseUtil as dbu
from util import ExportUtil as eu
fileConfig("logging_config.ini")
logger = logging.getLogger(__name__)
class StockExporter():
def __init__(self, export_dir, stock_code, start_date, end_date):
self.dir = export_dir
self.stock_code = stock_code
self.end_year = int(end_date[:4])
self.start_year = int(start_date[:4])
if self.end_year == self.start_year:
self.start_year = self.end_year - 1
self.sql_basic_data = "SELECT * FROM pdtb_stock_basic_data t WHERE t.CODE ='" + self.stock_code + "' "
self.sql_tech_data = "SELECT * FROM pdtb_stock_tech_data t WHERE t.CODE ='" + self.stock_code + "' "
def export(self):
for year in range(self.start_year, self.end_year):
logger.info("Export data for stock code " + self.stock_code + " for year " + str(year))
current_dir = self.dir + os.sep + self.stock_code + os.sep + str(year)
first_day = str(year) + "-01-01"
last_day = str(year) + "-12-31"
'''EXPORT BASIC DATA'''
basic_data_filename = "BASIC_" + self.stock_code + "_" + str(year) + ".csv"
sql_basic_data = self.sql_basic_data + "and t.DATE BETWEEN '" + first_day + "' AND '" + last_day + "'"
basic_data = dbu.get_pd_data(sql_basic_data)
if basic_data.shape[0] > 0:
if not os.path.exists(current_dir):
logger.debug("Make dir because there is no existing dir")
os.makedirs(current_dir)
eu.export(current_dir, basic_data_filename, basic_data)
logger.info("Basic data exported")
'''EXPORT TECH DATA'''
tech_data_filename = "TECH_" + self.stock_code + "_" + str(year) + ".csv"
sql_tech_data = self.sql_tech_data + "and t.DATE BETWEEN '" + first_day + "' AND '" + last_day + "'"
tech_data = dbu.get_pd_data(sql_tech_data)
if tech_data.shape[0] > 0:
if not os.path.exists(current_dir):
logger.debug("Make dir because there is no existing dir")
os.makedirs(current_dir)
eu.export(current_dir, tech_data_filename, tech_data)
logger.info("Tech data exported")
```
#### File: StockMarket3/tech/BRAR.py
```python
import numpy as np
from util import StockUtil as su
from tech import StockTechIndicator
class BRAR(StockTechIndicator):
def __init__(self):
StockTechIndicator.__init__(self)
def calculate(self, stock_code, date, time_period=26):
ar = 0.0
br = 0.0
data = su.get_basic_data(stock_code, date, time_period + 1).sort_index(ascending=False)
if data.shape[0] >= time_period + 1:
ar = round((data['HIGH'][1:] - data['OPEN'][1:]).sum() / (data['OPEN'][1:] - data['LOW'][1:]).sum() * 100, 3)
data['P_CLOSE'] = data['CLOSE'].shift(1)
data['BR_U'] = data['HIGH'][1:] - data['P_CLOSE'][1:]
data['BR_D'] = data['P_CLOSE'][1:] - data['LOW'][1:]
br = round(data[data['BR_U'] > 0]['BR_U'].sum() / data[data['BR_D'] > 0]['BR_D'].sum() *100, 3)
if np.isnan(ar) or np.isinf(ar) or np.isneginf(ar):
ar = 0.0
if np.isnan(br) or np.isinf(br) or np.isneginf(br):
br = 0.0
self.save_tech_data(stock_code, date, {'BRAR_BR': br,'BRAR_AR':ar})
return br, ar
if __name__ == "__main__":
b = BRAR()
print b.calculate('chbtc', 'btc_cny', '5min', 1497449100)
```
#### File: StockMarket3/tech/CCI.py
```python
import talib as ta
import numpy as np
from util import StockUtil as su
from tech import StockTechIndicator
class CCI(StockTechIndicator):
def __init__(self):
StockTechIndicator.__init__(self)
def calculate(self, stock_code, date, time_period=14):
cci = 0.0
data = su.get_basic_data(stock_code, date, time_period + 1).sort_index(ascending=False)
if data.shape[0] >= time_period + 1:
cci = round(ta.CCI(data['HIGH'].as_matrix(), data['LOW'].as_matrix(), data['CLOSE'].as_matrix(), time_period)[-1], 3)
if np.isnan(cci) or np.isinf(cci) or np.isneginf(cci):
cci = 0.0
self.save_tech_data(stock_code, date, {'CCI':cci})
return cci
if __name__ == "__main__":
b = CCI()
print b.calculate('chbtc', 'btc_cny', '5min', 1497449100)
```
#### File: StockMarket3/tech/MACD.py
```python
import pandas as pd
import numpy as np
from tech import StockTechIndicator
from util import StockUtil as su
class MACD(StockTechIndicator):
def __init__(self):
StockTechIndicator.__init__(self)
def calculate(self, stock_code, date, short=12, long=26, mid=9):
dif = 0.0
dea = 0.0
macd = 0.0
max_time_period = max(short, long, mid)
data = su.get_basic_data(stock_code, date, max_time_period*3).sort_index(ascending=False)
if data.shape[0] >= max_time_period*3:
close = data['CLOSE'].values
ewma_short = pd.ewma(close,span=short)
ewma_long = pd.ewma(close,span=long)
difs = (ewma_short-ewma_long)
deas = pd.ewma(difs,span=mid)
macds = (difs-deas)*2
dif = round(difs[-1], 3)
dea = round(deas[-1], 3)
macd = round(macds[-1], 3)
if np.isnan(dif) or np.isinf(dif) or np.isneginf(dif):
dif = 0.0
if np.isnan(dea) or np.isinf(dea) or np.isneginf(dea):
dea = 0.0
if np.isnan(macd) or np.isinf(macd) or np.isneginf(macd):
macd = 0.0
self.save_tech_data(stock_code, date, {'MACD_DIF': dif, 'MACD_DEA':dea, 'MACD':macd})
return dif, dea, macd
if __name__ == "__main__":
b = MACD()
print b.calculate('chbtc', 'btc_cny', '5min', 1497449100)
```
#### File: StockMarket3/tech/MTM.py
```python
import numpy as np
from tech import StockTechIndicator
from util import StockUtil as su
class MTM(StockTechIndicator):
def __init__(self):
StockTechIndicator.__init__(self)
def calculate(self, stock_code, date, time_period1=12, time_period2=6):
max_time_period = max(time_period1, time_period2)
mtm = 0.0
mamtm = 0.0
data = su.get_basic_data(stock_code, date, max_time_period * 2).sort_index(ascending=False)
if data.shape[0] >= max_time_period * 2:
data['N_CLOSE'] = data['CLOSE'].shift(time_period1)
data['MTM'] = data['CLOSE'] - data['N_CLOSE']
mtm = round(data['MTM'].as_matrix()[-1], 3)
mamtm = round(data['MTM'].as_matrix()[-6:].sum() / float(time_period2), 3)
if np.isnan(mtm) or np.isinf(mtm) or np.isneginf(mtm):
mtm = 0.0
if np.isnan(mamtm) or np.isinf(mamtm) or np.isneginf(mamtm):
mamtm = 0.0
self.save_tech_data(stock_code, date, {'MTM': mtm, 'MAMTM': mamtm})
return mtm, mamtm
if __name__ == "__main__":
b = MTM()
print b.calculate('chbtc', 'btc_cny', '5min', 1497449100)
``` |
{
"source": "a1exsh/taupage",
"score": 2
} |
#### File: taupage/init.d/02-register-td-agent.py
```python
import logging
import subprocess
import re
import boto.utils
from jinja2 import Environment, FileSystemLoader
from taupage import get_config
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
TPL_NAME = 'td-agent.conf.jinja2'
TD_AGENT_TEMPLATE_PATH = '/etc/td-agent/templates/'
TD_AGENT_OUTPUT_PATH = '/etc/td-agent/td-agent.conf'
def restart_td_agent_process():
''' Restart Fluentd '''
process = subprocess.Popen(['service', 'td-agent', 'restart'])
exit_code = process.wait(timeout=5)
if exit_code:
raise Exception("'service td-agent restart' failed with exit code: {0}".format(exit_code))
def get_scalyr_api_key():
''' Read Scalyr API key from Taupage config and set in template file '''
main_config = get_config()
config = main_config.get('logging')
scalyr_api_key = config.get('scalyr_account_key', main_config.get('scalyr_account_key'))
if scalyr_api_key:
# If scalyr_api_key starts with "aws:kms:" then decrypt key
match_kms_key = re.search('aws:kms:', scalyr_api_key, re.IGNORECASE)
if match_kms_key:
scalyr_api_key = re.sub(r'aws:kms:', '', scalyr_api_key)
try:
scalyr_api_key = subprocess.check_output(['python3',
'/opt/taupage/bin/decrypt-kms.py',
scalyr_api_key]).decode('UTF-8').strip()
except Exception:
logger.error('Failed to run /opt/taupage/bin/decrypt-kms.py')
raise SystemExit()
if scalyr_api_key == "Invalid KMS key.":
logger.error('Failed to decrypt KMS Key')
raise SystemExit(1)
return scalyr_api_key
def update_configuration_from_template(s3_default):
''' Update Jinja Template to create configuration file for Scalyr '''
fluentd_destinations = dict(scalyr=False, s3=False, rsyslog=False, scalyr_s3=False)
config = get_config()
logging_config = config.get('logging', {})
application_id = config.get('application_id')
application_version = config.get('application_version')
stack = config.get('notify_cfn', {}).get('stack')
source = config.get('source')
image = config.get('source').split(':', 1)[0]
instance_data = boto.utils.get_instance_identity()['document']
aws_region = instance_data['region']
aws_account = instance_data['accountId']
hostname = boto.utils.get_instance_metadata()['local-hostname'].split('.')[0]
customlog = config.get('mount_custom_log')
if config.get('rsyslog_aws_metadata'):
scalyr_syslog_log_parser = 'systemLogMetadata'
else:
scalyr_syslog_log_parser = 'systemLog'
scalyr_application_log_parser = logging_config.get('scalyr_application_log_parser', 'slf4j')
scalyr_custom_log_parser = logging_config.get('scalyr_custom_log_parser', 'slf4j')
fluentd_log_destination = logging_config.get('log_destination', 's3')
fluentd_syslog_destination = logging_config.get('syslog_destination', fluentd_log_destination)
fluentd_applog_destination = logging_config.get('applog_destination', fluentd_log_destination)
fluentd_authlog_destination = logging_config.get('authlog_destination', fluentd_log_destination)
fluentd_customlog_destination = logging_config.get('customlog_destination', fluentd_log_destination)
fluentd_applog_filter_exclude = logging_config.get('applog_filter_exclude', None)
fluentd_customlog_filter_exclude = logging_config.get('customlog_filter_exclude', None)
fluentd_loglevel = logging_config.get('fluentd_loglevel', 'error')
fluentd_s3_raw_log_format = logging_config.get('s3_raw_log_format', 'true')
fluentd_s3_region = logging_config.get('s3_region', aws_region)
fluentd_s3_bucket = logging_config.get('s3_bucket', 'zalando-logging-'+aws_account+'-'+aws_region)
fluentd_s3_timekey = logging_config.get('s3_timekey', '5m')
fluentd_s3_acl = logging_config.get('s3_acl', 'bucket-owner-full-control')
fluentd_rsyslog_host = logging_config.get('rsyslog_host')
fluentd_rsyslog_port = logging_config.get('rsyslog_port', '514')
fluentd_rsyslog_protocol = logging_config.get('rsyslog_protocol', 'tcp')
fluentd_rsyslog_severity = logging_config.get('rsyslog_severity', 'notice')
fluentd_rsyslog_program = logging_config.get('rsyslog_program', 'fluentd')
fluentd_rsyslog_hostname = logging_config.get('rsyslog_hostname', hostname)
for destination in (fluentd_applog_destination,
fluentd_authlog_destination,
fluentd_customlog_destination,
fluentd_syslog_destination):
fluentd_destinations[destination] = True
# Get Scalyr key only if configured
if fluentd_destinations.get('scalyr') or fluentd_destinations.get('scalyr_s3'):
scalyr_api_key = get_scalyr_api_key()
else:
scalyr_api_key = None
if fluentd_destinations.get('s3') or fluentd_destinations.get('scalyr_s3'):
try:
with open('/etc/cron.d/s3-iam-check', 'w') as file:
file.write('#!/bin/bash\n')
file.write('PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\n')
file.write('*/5 * * * * root /opt/taupage/bin/s3-iam-check.py test {!s}\n'.format(fluentd_s3_bucket))
except Exception:
logger.exception('Failed to write file /etc/cron.d/s3-iam-check')
raise SystemExit(1)
env = Environment(loader=FileSystemLoader(TD_AGENT_TEMPLATE_PATH), trim_blocks=True)
template_data = env.get_template(TPL_NAME).render(
scalyr_api_key=scalyr_api_key,
application_id=application_id,
application_version=application_version,
stack=stack,
source=source,
image=image,
aws_region=aws_region,
aws_account=aws_account,
customlog=customlog,
scalyr_application_log_parser=scalyr_application_log_parser,
scalyr_syslog_log_parser=scalyr_syslog_log_parser,
scalyr_custom_log_parser=scalyr_custom_log_parser,
fluentd_syslog_destination=fluentd_syslog_destination,
fluentd_applog_destination=fluentd_applog_destination,
fluentd_applog_filter_exclude=fluentd_applog_filter_exclude,
fluentd_authlog_destination=fluentd_authlog_destination,
fluentd_customlog_destination=fluentd_customlog_destination,
fluentd_customlog_filter_exclude=fluentd_customlog_filter_exclude,
fluentd_loglevel=fluentd_loglevel,
fluentd_s3_raw_log_format=fluentd_s3_raw_log_format,
fluentd_s3_region=fluentd_s3_region,
fluentd_s3_bucket=fluentd_s3_bucket,
fluentd_s3_timekey=fluentd_s3_timekey,
fluentd_s3_acl=fluentd_s3_acl,
fluentd_rsyslog_host=fluentd_rsyslog_host,
fluentd_rsyslog_port=fluentd_rsyslog_port,
fluentd_rsyslog_protocol=fluentd_rsyslog_protocol,
fluentd_rsyslog_severity=fluentd_rsyslog_severity,
fluentd_rsyslog_program=fluentd_rsyslog_program,
fluentd_rsyslog_hostname=fluentd_rsyslog_hostname,
fluentd_destinations=fluentd_destinations
)
try:
with open(TD_AGENT_OUTPUT_PATH, 'w') as f:
f.write(template_data)
except Exception:
logger.exception('Failed to write file td-agent.conf')
raise SystemExit(1)
if __name__ == '__main__':
hostname = boto.utils.get_instance_metadata()['local-hostname'].split('.')[0]
config = get_config()
logging_config = config.get('logging')
s3_default = False
if logging_config:
if not logging_config.get('fluentd_enabled'):
logger.info('Fluentd disabled; skipping Fluentd initialization')
raise SystemExit()
if not logging_config:
logger.info('Found no logging section in senza.yaml; enable dafault logging to s3')
s3_default = True
try:
with open('/var/local/textfile_collector/fluentd_default_s3.prom', 'w') as file:
file.write('fluentd_default_s3_logging{{tag=\"td-agent\",hostname=\"{!s}\"}} 1.0\n'
.format(hostname))
except Exception:
logger.exception('Failed to write file /var/local/textfile_collector/fluentd_default_s3.prom')
raise SystemExit(1)
try:
with open('/etc/cron.d/get_fluentd_metrics', 'w') as file:
file.write('#!/bin/bash\n')
file.write('PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\n')
file.write('* * * * * root /opt/taupage/bin/get-fluentd-metrics.sh\n')
except Exception:
logger.exception('Failed to write file /etc/cron.d/get_fluentd_metrics')
raise SystemExit(1)
update_configuration_from_template(s3_default)
restart_td_agent_process()
``` |
{
"source": "a1ext/DIE",
"score": 2
} |
#### File: DIE/UI/BPView.py
```python
from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget
from idaapi import PluginForm
from DIE.Lib import BpHandler
import DIE.UI.Die_Icons
class BreakpointView(PluginForm):
"""
DIE Value View
"""
def __init__(self):
super(BreakpointView, self).__init__()
self.bp_handler = None
self.bp_tree_widget = None
self.die_icons = None
def Show(self):
return PluginForm.Show(self,
"Breakpoint View",
options=PluginForm.FORM_PERSIST)
def OnCreate(self, form):
"""
Called when the view is created
"""
self.bp_tree_widget = QtWidgets.QTreeWidget()
self.bp_handler = BpHandler.get_bp_handler()
self.die_icons = DIE.UI.Die_Icons.get_die_icons()
# Get parent widget
self.parent = form_to_widget(form)
self._add_parser_data()
toolbar = QtWidgets.QToolBar()
action_refresh = QtWidgets.QAction(self.die_icons.icon_refresh, "Refresh", toolbar)
action_refresh.triggered.connect(self.refresh)
toolbar.addAction(action_refresh)
layout = QtWidgets.QGridLayout()
layout.addWidget(toolbar)
layout.addWidget(self.bp_tree_widget)
self.parent.setLayout(layout)
def refresh(self):
"""
Reload the view with current values
"""
self._add_parser_data()
def _add_parser_data(self):
"""
Add data to the breakpoint widget model
"""
if self.bp_tree_widget is not None:
self.bp_tree_widget.clear()
else:
self.bp_tree_widget = QtWidgets.QTreeWidget()
root_item = self.bp_tree_widget.invisibleRootItem()
self.bp_tree_widget.setHeaderLabel("Breakpoints")
# Excluded Modules
module_item = QtWidgets.QTreeWidgetItem()
module_item.setText(0, "Excluded Modules")
module_item.setFlags(QtCore.Qt.ItemIsEnabled)
row = 0
for module in self.bp_handler.excluded_modules:
current_row_item = QtWidgets.QTreeWidgetItem()
current_row_item.setFlags(QtCore.Qt.ItemIsEnabled)
current_row_item.setText(0, module)
module_item.insertChild(row, current_row_item)
row += 1
# Excluded Functions
function_item = QtWidgets.QTreeWidgetItem()
function_item.setText(0, "Excluded Functions")
function_item.setFlags(QtCore.Qt.ItemIsEnabled)
row = 0
for function in self.bp_handler.excluded_funcNames:
current_row_item = QtWidgets.QTreeWidgetItem()
current_row_item.setFlags(QtCore.Qt.ItemIsEnabled)
current_row_item.setText(0, function)
function_item.insertChild(row, current_row_item)
row += 1
# Excluded Addresses
ea_item = QtWidgets.QTreeWidgetItem()
ea_item.setText(0, "Excluded Addresses")
ea_item.setFlags(QtCore.Qt.ItemIsEnabled)
row = 0
for ea in self.bp_handler.excluded_bp_ea:
current_row_item = QtWidgets.QTreeWidgetItem()
current_row_item.setFlags(QtCore.Qt.ItemIsEnabled)
current_row_item.setText(0, hex(ea))
ea_item.insertChild(row, current_row_item)
row += 1
current_row = 0
if module_item.childCount() > 0:
root_item.insertChild(current_row, module_item)
current_row += 1
if function_item.childCount() > 0:
root_item.insertChild(current_row, function_item)
current_row += 1
if ea_item.childCount() > 0:
root_item.insertChild(current_row, ea_item)
current_row += 1
_bp_view = None
def get_view():
return _bp_view
def initialize():
global _bp_view
_bp_view = BreakpointView()
```
#### File: DIE/UI/FunctionViewEx.py
```python
import networkx as nx
from awesome.context import ignored
import sark
import idaapi
import idautils
import idc
from idaapi import PluginForm
from sark.qt import QtGui, QtCore, QtWidgets, form_to_widget, use_qt5
if use_qt5:
_QSortFilterProxyModel = QtCore.QSortFilterProxyModel
_MatchRecursive = QtCore.Qt.MatchRecursive
_MatchExactly = QtCore.Qt.MatchExactly
_PositionAtTop = QtWidgets.QAbstractItemView.PositionAtTop
else:
_QSortFilterProxyModel = QtGui.QSortFilterProxyModel
_MatchRecursive = QtCore.Qt.MatchFlag.MatchRecursive
_MatchExactly = QtCore.Qt.MatchFlag.MatchExactly
_PositionAtTop = QtWidgets.QAbstractItemView.ScrollHint.PositionAtTop
import DIE.UI.Die_Icons
import DIE.UI.ValueViewEx
import DIE.UI.ParserView
import DIE.UI.BPView
import DIE.Lib.IDAConnector
import DIE.Lib.DIEDb
import DIE.Lib.BpHandler
import sark.ui
class FunctionView(PluginForm):
"""
DIE Function View
"""
def __init__(self):
super(FunctionView, self).__init__()
self.value_view = None
self.bp_handler = None
self.die_icons = None
self.die_db = None
self.highligthed_items = []
def Show(self):
# Reset highlighted items
self.highligthed_items = []
return PluginForm.Show(self,
"Function View",
options=PluginForm.FORM_PERSIST)
def OnCreate(self, form):
"""
Called when the plugin form is created
"""
self.value_view = DIE.UI.ValueViewEx.get_view()
self.bp_handler = DIE.Lib.BpHandler.get_bp_handler()
self.die_icons = DIE.UI.Die_Icons.get_die_icons()
self.die_db = DIE.Lib.DIEDb.get_db()
# Get parent widget
self.parent = form_to_widget(form)
self.functionModel = QtGui.QStandardItemModel()
self.functionTreeView = QtWidgets.QTreeView()
self.functionTreeView.setExpandsOnDoubleClick(False)
#self.functionTreeView.setSortingEnabled(True)
delegate = TreeViewDelegate(self.functionTreeView)
self.functionTreeView.setItemDelegate(delegate)
self.functionTreeView.doubleClicked.connect(self.itemDoubleClickSlot)
self._model_builder(self.functionModel)
self.functionTreeView.setModel(self.functionModel)
self.functionTreeView.setColumnWidth(0, 200)
self.functionTreeView.setColumnWidth(1, 20)
self.functionTreeView.setColumnWidth(2, 20)
self.functionTreeView.setColumnWidth(3, 20)
self.functionTreeView.setColumnWidth(4, 250)
self.functionTreeView.setColumnWidth(5, 100)
self.functionTreeView.setColumnWidth(6, 20)
self.functionTreeView.setColumnWidth(7, 450)
self.functionTreeView.setColumnWidth(8, 20)
self.functionTreeView.setColumnWidth(9, 450)
# Context menus
self.functionTreeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.functionTreeView.customContextMenuRequested.connect(self.onCustomContextMenu)
# Actions
self.context_menu_param = None # Parameter to be passed to context menu slots
action_exclude_func = QtWidgets.QAction("Exclude Function", self.functionTreeView, triggered=lambda: self.on_exclude_func(self.context_menu_param))
action_exclude_func_adrs = QtWidgets.QAction("Exclude All Function Calls", self.functionTreeView, triggered=lambda: self.on_exclude_func_adrs(self.context_menu_param))
action_exclude_ea = QtWidgets.QAction("Exclude Address", self.functionTreeView, triggered=lambda: self.on_exclude_ea(self.context_menu_param))
action_exclude_library = QtWidgets.QAction("Exclude Library", self.functionTreeView, triggered=lambda: self.on_exclude_library(self.context_menu_param))
action_value_detail = QtWidgets.QAction("Inspect Value Details", self.functionTreeView, triggered=lambda: self.on_value_detail(self.context_menu_param))
action_show_callgraph = QtWidgets.QAction("Show Call-Graph", self.functionTreeView, triggered=lambda: self.on_show_callgraph(self.context_menu_param))
# Function ContextMenu
self.function_context_menu = QtWidgets.QMenu(self.functionTreeView)
self.function_context_menu.addAction(action_exclude_func)
self.function_context_menu.addAction(action_exclude_library)
self.function_context_menu.addAction(action_exclude_func_adrs)
# Function ea ContextMenu
self.ea_context_menu = QtWidgets.QMenu(self.functionTreeView)
self.ea_context_menu.addAction(action_exclude_ea)
self.ea_context_menu.addAction(action_show_callgraph)
# Argument value ContextMenu
self.value_context_menu = QtWidgets.QMenu(self.functionTreeView)
self.value_context_menu.addAction(action_value_detail)
# Therad ComboBox
threads = []
if self.die_db is not None:
threads = self.die_db.get_thread_list()
thread_id_list = []
thread_id_list.append("All Threads")
for thread in threads:
thread_id_list.append(str(thread.thread_num))
self.thread_id_combo = QtWidgets.QComboBox()
self.thread_id_combo.addItems(thread_id_list)
self.thread_id_combo.activated[str].connect(self.on_thread_combobox_change)
self.thread_id_label = QtWidgets.QLabel("Thread: ")
# Toolbar
self.function_toolbar = QtWidgets.QToolBar()
self.function_toolbar.addWidget(self.thread_id_label)
self.function_toolbar.addWidget(self.thread_id_combo)
# Grid
layout = QtWidgets.QGridLayout()
layout.addWidget(self.function_toolbar)
layout.addWidget(self.functionTreeView)
self.parent.setLayout(layout)
def OnClose(self, form):
idaapi.msg("Closed\n")
def isVisible(self):
"""
Is functionview visible
@return: True if visible, otherwise False
"""
try:
return self.functionTreeView.isVisible()
except:
return False
def _model_builder(self, model):
"""
Build the function model.
@param model: QStandardItemModel object
"""
model.clear() # Clear the model
root_node = model.invisibleRootItem()
self._make_model_headers(model)
if self.die_db is None:
return
# Add db functions to the model
for function in self.die_db.get_functions():
item_list_func = self._make_function_item(function)
if function.is_lib_func: # Color library function
for tmp_item in item_list_func:
tmp_item.setBackground(QtGui.QColor(184, 223, 220))
item_function = item_list_func[0]
root_node.appendRow(item_list_func)
# Add function contexts ea\occurrences for the current function
func_context_dict = self.die_db.get_function_context_dict(function)
for function_context_ea in func_context_dict:
function_context_list = func_context_dict[function_context_ea]
if not len(function_context_list) > 0:
continue
item_func_context_list = self._make_function_ea_item(function_context_list[0])
item_func_context_ea = item_func_context_list[0]
item_function.appendRow(item_func_context_list)
occurrence_num = 0
for function_context in function_context_list:
item_func_context_list = self._make_func_occur_item(function_context, occurrence_num)
item_func_context = item_func_context_list[0]
item_func_context_ea.appendRow(item_func_context_list)
self._insert_thread_data(item_function, function_context.thread_id)
self._insert_thread_data(item_func_context_ea, function_context.thread_id)
# Add function arguments to each context
current_call_values = self.die_db.get_call_values(function_context)
current_ret_values = self.die_db.get_return_values(function_context)
curret_ret_arg_value = self.die_db.get_return_arg_value(function_context)
for arg_index in xrange(0, function.arg_num):
try:
current_arg = self.die_db.get_function_arg(function, arg_index)
self._add_model_arg_value(item_func_context,
current_call_values[arg_index],
current_ret_values[arg_index],
current_arg.name,
current_arg.type)
except IndexError:
break
ret_arg = self.die_db.get_function_arg(function, -1)
if ret_arg is None:
ret_arg_type = "VOID"
else:
ret_arg_type = ret_arg.type
# Add return argument
self._add_model_arg_value(item_func_context,
None,
curret_ret_arg_value,
"ret_arg",
ret_arg_type)
# Increment occurrence counter
occurrence_num += 1
# Add non-executed function to the model
# for func_ea in idautils.Functions():
# func_name = DIE.Lib.IDAConnector.get_function_name(func_ea)
#
# if self.die_db.get_function_by_name(func_name) is None:
# item_list_func = self._make_nonexec_function_time(func_name)
#
# if function.is_lib_func: # Color library function
# for tmp_item in item_list_func:
# tmp_item.setBackground(QtGui.QColor(255, 0, 0, 127))
#
# root_node.appendRow(item_list_func)
def _make_model_headers(self, model):
"""
Set the model horizontal header data
@param model: the QStandardItemModel which headers should be set
"""
### Function Header
item_header = QtGui.QStandardItem("Function")
item_header.setToolTip("Function Name")
model.setHorizontalHeaderItem(0, item_header)
### Call number header
item_header = QtGui.QStandardItem("#")
item_header.setToolTip("Number of calls preformed to this function")
model.setHorizontalHeaderItem(1, item_header)
### Indirect Header
item_header = QtGui.QStandardItem("I")
item_header.setToolTip("Indirect Call")
model.setHorizontalHeaderItem(2, item_header)
### Indirect Header
item_header = QtGui.QStandardItem("N")
item_header.setToolTip("New Function")
model.setHorizontalHeaderItem(3, item_header)
### Indirect Header
item_header = QtGui.QStandardItem("Type")
item_header.setToolTip("Argument Type")
model.setHorizontalHeaderItem(4, item_header)
### New Function Header
item_header = QtGui.QStandardItem("Name")
item_header.setToolTip("Argument Name")
model.setHorizontalHeaderItem(5, item_header)
### Call Value Icon Header
item_header = QtGui.QStandardItem("")
model.setHorizontalHeaderItem(6, item_header)
### Call Value Header
item_header = QtGui.QStandardItem("Call Value")
item_header.setToolTip("Argument`s value on function call")
model.setHorizontalHeaderItem(7, item_header)
### Return Value Icon Header
item_header = QtGui.QStandardItem("")
model.setHorizontalHeaderItem(8, item_header)
### Return Value Header
item_header = QtGui.QStandardItem("Return Value")
item_header.setToolTip("Argument`s value on function return")
model.setHorizontalHeaderItem(9, item_header)
def _make_thread_id_data(self, thread_id):
"""
Delimit thread_id data in order to support filtering\sorting on multi-thread data items
@param thread_id: thread id to normalize
@return: a normalized string of the thread_id to be used sa data for ThreadId_Role
"""
return "t%st" % str(thread_id)
def _insert_thread_data(self, item, thread_id):
"""
Insert thread_id data into a model item.
The value found in thread_id argument will be delimited by the _make_thread_id_data function
(e.g: thread_id 123 will become 't123t')
the delimited value will then be appended to a string of concatenated (unique) child-item thread-ids
(for example a item data value can be "a123aa5672aa11112a") for threads 123, 5672 and 111112
@param item: the model item to add the data to
@param thread_id: thread_id number
@return: True if thread data was successfully added to item, otherwise False
"""
try:
current_thread_id = self._make_thread_id_data(thread_id)
thread_data = item.data(role=DIE.UI.ThreadId_Role)
if thread_data is None:
item.setData(current_thread_id, role=DIE.UI.ThreadId_Role)
elif not current_thread_id in thread_data:
item.setData(thread_data + current_thread_id, role=DIE.UI.ThreadId_Role)
return True
except Exception as ex:
idaapi.msg("Error while inserting thread data: %s\n" %ex)
return False
def _make_function_item(self, function):
"""
Build a tree item for a function name (level-0)
@param function: dbFunction object
@return: QStandradItemModel item for the function
"""
function_txt = "%s" % function.function_name
item_function = QtGui.QStandardItem(self.die_icons.icon_function, function_txt)
item_function.setData(function, role=DIE.UI.Function_Role)
function_count = self.die_db.count_function_occurs(function)
item_function_count = QtGui.QStandardItem(str(function_count))
item_function_count.setEditable(False)
item_function.setEditable(False)
item_list = [item_function,
item_function_count,
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem()]
return item_list
def _make_nonexec_function_time(self, function_name):
"""
Build a tree item for a function name (for a non-executed function)
@type: String
@param function_name: Function name
@return:
"""
item_function = QtGui.QStandardItem(self.die_icons.icon_function, function_name)
item_function_count = QtGui.QStandardItem("0")
item_function_count.setEditable(False)
item_function.setEditable(False)
item_list = [item_function, item_function_count]
return item_list
def _make_function_ea_item(self, function_context):
"""
Build a tree item for a function_ea node (level-1)
@param function_context: a dbFunction_Context object
@return: QStandradItemModel item for the function context
"""
calling_function_start = None
with ignored(sark.exceptions.SarkNoFunction):
calling_function_start = sark.Function(function_context.calling_ea).startEA
if calling_function_start is not None:
call_offset = function_context.calling_ea - calling_function_start
func_ea_txt = "%s+%s" % (function_context.calling_func_name, hex(call_offset))
else:
func_ea_txt = "[%s]:%s" % (function_context.calling_func_name, hex(function_context.calling_ea))
item_func_context_ea = QtGui.QStandardItem(func_ea_txt)
item_func_context_ea.setEditable(False)
item_func_context_ea.setData(hex(function_context.calling_ea), role=QtCore.Qt.ToolTipRole)
item_func_context_ea.setData(function_context, role=DIE.UI.FunctionContext_Role)
item_func_context_ea.setData(id(function_context), role=DIE.UI.ContextId_Role) # Used for module look-ups
item_func_is_indirect = QtGui.QStandardItem()
item_func_is_indirect.setEditable(False)
if function_context.is_indirect:
item_func_is_indirect.setIcon(self.die_icons.icon_v)
item_func_is_new = QtGui.QStandardItem()
item_func_is_new.setEditable(False)
if function_context.is_new_func:
item_func_is_new.setIcon(self.die_icons.icon_v)
item_list = [item_func_context_ea,
QtGui.QStandardItem(),
item_func_is_indirect,
item_func_is_new,
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem()]
return item_list
def _make_func_occur_item(self, function_context, occur_num):
"""
Build a tree item for function occurrence (level-2)
@param function_context: a dbFunction_Context object
@param occur_num: occurrence number
@return: QStandradItemModel item for the function occurrence
"""
func_occur_txt = "Occur %s" % str(occur_num)
item_func_context = QtGui.QStandardItem(func_occur_txt)
item_func_context.setColumnCount(5)
item_func_context.setEditable(False)
item_func_context.setData(function_context, role=DIE.UI.FunctionContext_Role)
item_func_context.setData(id(function_context), role=DIE.UI.ContextId_Role) # Used for module look-ups
item_func_context.setData(self._make_thread_id_data(function_context.thread_id), role=DIE.UI.ThreadId_Role)
item_list = [item_func_context,
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem(),
QtGui.QStandardItem()]
return item_list
def _add_model_arg_value(self, parent, call_value, ret_value, arg_name, arg_type, nest_depth=0):
"""
Add a debug value
@param parent:
@param call_value:
@param ret_value:
@param arg_name:
@param arg_type:
@return:
"""
arg_count = parent.rowCount()
this_row_item = QtGui.QStandardItem("")
this_row_item.setData(parent.data(role=DIE.UI.ThreadId_Role), role=DIE.UI.ThreadId_Role) # Inherit thread data from parent
# Set indentation for argument types (for nested values)
arg_ident = " " * nest_depth
arg_ident_type = arg_ident + arg_type
item_parsed_val_flag_call = QtGui.QStandardItem()
item_parsed_val_call = QtGui.QStandardItem()
item_parsed_val_flag_ret = QtGui.QStandardItem()
item_parsed_val_ret = QtGui.QStandardItem()
# Get Call Value
if call_value is not None:
parsed_vals = self.die_db.get_parsed_values(call_value)
this_row_item.setData(parsed_vals, role=DIE.UI.CallValue_Role)
if parsed_vals is not None and len(parsed_vals) > 0:
is_guessed, best_val = self.die_db.get_best_parsed_val(parsed_vals)
item_parsed_val_call = QtGui.QStandardItem(best_val.data)
if is_guessed:
item_parsed_val_flag_call.setIcon(self.die_icons.icon_question)
if len(parsed_vals) > 1: # If more the 1 item, show a combo-box
item_parsed_val_call.setData(parsed_vals, role=DIE.UI.ParsedValuesRole)
item_parsed_val_flag_call.setIcon(self.die_icons.icon_more)
else:
item_parsed_val_call.setData(parsed_vals[0], role=DIE.UI.ParsedValueRole)
else:
parsed_val_data = "NULL"
if call_value.derref_depth == 0:
parsed_val_data = "!MAX_DEREF!"
if call_value.raw_value is not None:
parsed_val_data = hex(call_value.raw_value)
if len(call_value.nested_values) > 0 or call_value.reference_flink is not None:
parsed_val_data = ""
item_parsed_val_call = QtGui.QStandardItem(parsed_val_data)
# Get return value
if ret_value is not None:
parsed_vals = self.die_db.get_parsed_values(ret_value)
this_row_item.setData(parsed_vals, role=DIE.UI.RetValue_Role)
# If len(parsed_vals)>1 create a combobox delegate.
if parsed_vals:
is_guessed, best_val = self.die_db.get_best_parsed_val(parsed_vals)
item_parsed_val_ret = QtGui.QStandardItem(best_val.data)
if is_guessed:
item_parsed_val_flag_ret.setIcon(self.die_icons.icon_question)
if len(parsed_vals) > 1: # If more the 1 item, show a combo-box
item_parsed_val_ret.setData(parsed_vals, role=DIE.UI.ParsedValuesRole)
item_parsed_val_flag_ret.setIcon(self.die_icons.icon_more)
else:
item_parsed_val_ret.setData(parsed_vals[0], role=DIE.UI.ParsedValueRole)
else:
parsed_val_data = "NULL"
if ret_value.derref_depth == 0:
parsed_val_data = "!MAX_DEREF!"
if ret_value.raw_value is not None:
parsed_val_data = hex(ret_value.raw_value)
if ret_value.nested_values or ret_value.reference_flink is not None:
parsed_val_data = ""
item_parsed_val_ret = QtGui.QStandardItem(parsed_val_data)
parent.setChild(arg_count, 0, this_row_item)
parent.setChild(arg_count, 1, QtGui.QStandardItem())
parent.setChild(arg_count, 2, QtGui.QStandardItem())
parent.setChild(arg_count, 3, QtGui.QStandardItem())
parent.setChild(arg_count, 4, QtGui.QStandardItem(arg_ident_type))
parent.setChild(arg_count, 5, QtGui.QStandardItem(arg_name))
parent.setChild(arg_count, 6, item_parsed_val_flag_call)
parent.setChild(arg_count, 7, item_parsed_val_call)
parent.setChild(arg_count, 8, item_parsed_val_flag_ret)
parent.setChild(arg_count, 9, item_parsed_val_ret)
# If current object contains reference values, add them to the module
self._add_model_arg_ref(this_row_item, call_value, ret_value, nest_depth)
# If current object is a container object, Add its members to the module
self._add_model_container_members(this_row_item, call_value, ret_value, nest_depth)
def _add_model_arg_ref(self, parent, call_value, ret_value, nest_depth=0):
"""
Add a reference value to module
@param parent:
@param call_value:
@param ret_value:
@param nest_depth:
@return:
"""
# If call debug value is a reference
if call_value is not None:
if call_value.reference_flink is not None and not call_value.is_definitely_parsed:
ref_val_call = self.die_db.get_dbg_value(call_value.reference_flink)
ref_val_ret = None
# Try to get the same reference from the return debug value.
if ret_value is not None and ret_value.type == call_value.type:
if ret_value.reference_flink is not None and not ret_value.is_definitely_parsed:
ref_val_ret = self.die_db.get_dbg_value(ret_value.reference_flink)
self._add_model_arg_value(parent, ref_val_call, ref_val_ret, ref_val_call.name, ref_val_call.type, nest_depth+1)
# If return debug value is a reference (and call value is not)
elif ret_value is not None:
if ret_value.reference_flink is not None and not ret_value.is_definitely_parsed:
ref_val = self.die_db.get_dbg_value(ret_value.reference_flink)
self._add_model_arg_value(parent, None, ref_val, ref_val.name, ref_val.type, nest_depth+1)
def _add_model_container_members(self, parent, call_value, ret_value, nest_depth=0):
"""
Add container members to module
@param parent:
@param call_value:
@param ret_value:
@param nest_depth:
@return:
"""
# If call value is a container type (struct\union\etc)
if call_value is not None and call_value.nested_values is not None:
if call_value.nested_values:
for index in xrange(0, len(call_value.nested_values)):
nested_val_call = self.die_db.get_dbg_value(call_value.nested_values[index])
nested_val_ret = None
# Try to get the same member from the return debug value.
if ret_value is not None and ret_value.type == call_value.type:
if ret_value.nested_values is not None:
if ret_value.nested_values:
nested_val_ret = self.die_db.get_dbg_value(ret_value.nested_values[index])
self._add_model_arg_value(parent, nested_val_call, nested_val_ret, nested_val_call.name, nested_val_call.type, nest_depth+1)
# If return value is a container type (and call value is not)
elif ret_value is not None:
if ret_value.nested_values is not None:
if ret_value.nested_values:
for nested_value in ret_value.nested_values:
nested_val_ret = self.die_db.get_dbg_value(nested_value)
self._add_model_arg_value(parent,
None,
nested_val_ret,
nested_val_ret.name,
nested_val_ret.type,
nest_depth+1)
def reset_function_count(self, thread_id=None):
"""
Reset the function count and set the count according to currently selected thread_id
@param thread_id: currently selected thread_id
"""
root_item = self.functionModel.item(0, 0)
rows = root_item.rowCount()
thread_id = self.thread_id_combo.currentText()
for row in xrange(0, rows):
cur_item = root_item.child(row, 0)
function = cur_item.data(role=DIE.UI.Function_Role)
if function is not None:
count = 0
if thread_id is None:
count = self.die_db.count_function_occurs(function)
else:
count = self.die_db.count_function_occurs(function, int(thread_id))
func_count_item = root_item.child(row, 1)
func_count_item.setText(str(count))
###############################################################################################
# Highlight Items.
def highlight_item(self, item):
"""
Highlight a single item
@param item: module item
"""
try:
item.setBackground(QtGui.QColor('yellow'))
cur_font = item.font()
cur_font.setBold(True)
item.setFont(cur_font)
except Exception as ex:
idaapi.msg("Error while highlighting item: %s\n" %ex)
def highlight_item_row(self, item):
"""
highlight the entire row containing a table item
@param item: table item
"""
try:
if not item.index().isValid():
return
parent = item.parent()
if parent is None:
parent = item
if not parent.hasChildren():
self.highlight_item(parent)
return
row = item.row()
column_num = parent.columnCount()
for column in xrange(0, column_num):
if self.functionModel.hasIndex(row, column, parent.index()):
cur_index = self.functionModel.index(row, column, parent.index())
self.highlight_item(self.functionModel.itemFromIndex(cur_index))
persistent_index = QtCore.QPersistentModelIndex(cur_index)
self.highligthed_items.append(persistent_index)
except Exception as ex:
idaapi.msg("Error while highlighting item row: %s\n" % ex)
def clear_highlights(self):
"""
Clear all highlighted items
@return:
"""
try:
self.functionTreeView.collapseAll()
for persistent_index in self.highligthed_items:
if persistent_index.isValid():
item = self.functionModel.itemFromIndex(persistent_index)
item.setBackground(QtGui.QColor('white'))
cur_font = item.font()
cur_font.setBold(False)
item.setFont(cur_font)
self.highligthed_items = []
except Exception as ex:
idaapi.msg("Error while clearing highlights: %s\n" % ex)
###############################################################################################
# Find Items.
def find_function(self, function_name):
"""
Find and highlight a function in current module
@param function_name: Function name
"""
self.clear_highlights()
matched_items = self.functionModel.findItems(function_name)
for item in matched_items:
self.functionTreeView.expand(item.index())
self.functionTreeView.scrollTo(item.index(), _PositionAtTop)
self.highlight_item_row(item)
def find_context_list(self, context_list):
"""
Find and highlight a list of function contexts
@param context_list: list of function contexts (of type dbFunction_Context)
"""
try:
self.clear_highlights()
root_index = self.functionModel.index(0, 0)
if not root_index.isValid():
return
for func_context in context_list:
context_id = id(func_context)
matched_items = self.functionModel.match(root_index, DIE.UI.ContextId_Role, context_id, -1, _MatchRecursive | _MatchExactly)
for index in matched_items:
if not index.isValid():
continue
# Do not highlight "ea root" items, only occurrences of it.
if not index.data().startswith("Occur"):
continue
item = self.functionModel.itemFromIndex(index)
self.functionTreeView.expand(index)
self.functionTreeView.scrollTo(index, _PositionAtTop)
self.highlight_item_row(item)
return True
except Exception as ex:
idaapi.msg("Error while looking up function context in FunctionView: %s\n" % ex)
return False
###############################################################################################
# Slots.
# @QtCore.Slot(QtCore.QModelIndex)
def itemDoubleClickSlot(self, index):
"""
TreeView DoubleClicked Slot.
@param index: QModelIndex object of the clicked tree index item.
@return:
"""
function = index.data(role=DIE.UI.Function_Role)
if function is not None:
ea = function.function_start
if function.is_lib_func:
ea = function.proto_ea
if ea is not None and ea is not idc.BADADDR:
idc.Jump(ea)
return True
func_context = index.data(role=DIE.UI.FunctionContext_Role)
if func_context is not None:
ea = func_context.calling_ea
if ea is not None and ea is not idc.BADADDR:
idc.Jump(ea)
return True
# @QtCore.Slot(QtCore.QPoint)
def onCustomContextMenu(self, point):
index = self.functionTreeView.indexAt(point)
is_function_item = index.data(role=DIE.UI.Function_Role)
is_func_context_item = index.data(role=DIE.UI.FunctionContext_Role)
is_value_item = index.data(role=DIE.UI.ParsedValueRole)
if is_function_item is not None:
self.context_menu_param = is_function_item
self.function_context_menu.exec_(self.functionTreeView.mapToGlobal(point))
if is_func_context_item is not None:
self.context_menu_param = is_func_context_item
self.ea_context_menu.exec_(self.functionTreeView.mapToGlobal(point))
if is_value_item is not None:
self.context_menu_param = is_value_item
self.value_context_menu.exec_(self.functionTreeView.mapToGlobal(point))
# @QtCore.Slot(str)
def on_exclude_func(self, function):
if not isinstance(function, DIE.Lib.DIEDb.dbFunction):
if function is not None:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs': %s. excpected dbFunction_Context" % function.__class__)
else:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs'")
self.bp_handler.add_bp_funcname_exception(function.function_name)
return
# @QtCore.Slot(str)
def on_exclude_func_adrs(self, function):
if not isinstance(function, DIE.Lib.DIEDb.dbFunction):
if function is not None:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs': %s. excpected dbFunction_Context" % function.__class__)
else:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs'")
func_context_list = self.die_db.get_function_context_list(function)
for func_context in func_context_list:
self.bp_handler.add_bp_ea_exception(func_context.calling_ea)
return
# @QtCore.Slot(str)
def on_exclude_ea(self, function_context):
if not isinstance(function_context, DIE.Lib.DIEDb.dbFunction_Context):
if function_context is not None:
raise ValueError("Wrong value sent to 'on_exclude_ea': %s. excpected dbFunction_Context" % function_context.__class__)
else:
raise ValueError("Wrong value sent to 'on_exclude_ea'")
self.bp_handler.add_bp_ea_exception(function_context.calling_ea)
return
# @QtCore.Slot(str)
def on_show_callgraph(self, function_context):
if not isinstance(function_context, DIE.Lib.DIEDb.dbFunction_Context):
if function_context is not None:
raise ValueError("Wrong value sent to 'on_show_callgraph': %s. excpected dbFunction_Context" % function_context.__class__)
else:
raise ValueError("Wrong value sent to 'on_show_callgraph'")
graph = nx.DiGraph()
call_graph = self.die_db.get_call_graph_to(function_context)
if not call_graph:
idaapi.msg("No Execution Graph")
return
for ctxt_node in call_graph:
(from_address, to_address) = ctxt_node
graph.add_edge(from_address, to_address)
function_name = self.die_db.get_function_name(function_context.function)
viewer = sark.ui.NXGraph(graph, "Callgraph for {}".format(function_name), handler=sark.ui.AddressNodeHandler())
viewer.Show()
return
# @QtCore.Slot(str)
def on_exclude_library(self, function):
if not isinstance(function, DIE.Lib.DIEDb.dbFunction):
if function is not None:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs': %s. excpected dbFunction_Context" % function.__class__)
else:
raise ValueError("Wrong value sent to 'on_exclude_func_adrs'")
if function.is_lib_func and function.lib_name is not None:
self.bp_handler.add_module_exception(function.lib_name)
return
# @QtCore.Slot(str)
def on_value_detail(self, value):
if not self.value_view.isVisible():
self.value_view.Show()
self.value_view.find_value(value)
return
def on_thread_combobox_change(self, thread_id):
self.reset_function_count(thread_id) # reset function count according to currently selected thread
if thread_id == "All Threads":
if not self.functionTreeView.model() is self.functionModel:
self.functionTreeView.setModel(self.functionModel)
return
hidden_threads = ".*" + self._make_thread_id_data(thread_id) + ".*"
threadProxyModel = _QSortFilterProxyModel()
threadProxyModel.setFilterRole(DIE.UI.ThreadId_Role)
threadProxyModel.setFilterRegExp(hidden_threads)
threadProxyModel.setSourceModel(self.functionModel)
self.functionTreeView.setModel(threadProxyModel)
def on_valueview_button(self):
value_view = DIE.UI.ValueViewEx.get_view()
value_view.Show()
def on_pluginsview_button(self):
plugins_view = DIE.UI.ParserView.get_view()
plugins_view.Show()
def on_bpview_button(self):
bp_view = DIE.UI.BPView.get_view()
bp_view.Show()
###############################################################################################
# View Delegates.
class TreeViewDelegate(QtWidgets.QStyledItemDelegate):
"""
Delegate for parsed value viewing in the tree view
"""
def __init__(self, parent):
QtWidgets.QStyledItemDelegate.__init__(self, parent)
self.parent = parent
def createEditor(self, parent, option, index):
parsed_val_list = index.data(role=DIE.UI.ParsedValuesRole)
# Show combobox only if parsed_value as two or more items.
if parsed_val_list is not None and len(parsed_val_list) > 1:
lines = []
for parsed_val in parsed_val_list:
line_txt = "%d, %s, %s" % (parsed_val.score, parsed_val.data, parsed_val.description)
lines.append(line_txt)
combo_box = QtWidgets.QComboBox(parent)
combo_box.addItems(lines)
return combo_box
def setEditorData(self, editor, index):
editor.blockSignals(True)
editor.setCurrentIndex(int(index.model().data(index)))
editor.blockSignals(False)
# Singelton
function_view = None
def initialize():
global function_view
function_view = FunctionView()
def get_view():
return function_view
``` |
{
"source": "A1exTrask/Python3_AutoClicker",
"score": 4
} |
#### File: A1exTrask/Python3_AutoClicker/Click counter.py
```python
from tkinter import *
root = Tk()
x = (root.winfo_screenwidth() - root.winfo_reqwidth()) / 2
y = (root.winfo_screenheight() - root.winfo_reqheight()) / 2
root.wm_geometry("+%d+%d" % (x, y))
root.title('Click counter')
root.geometry('180x130')
root.resizable(width=False, height=False)
root.attributes('-topmost', 1)
count = 0
def clicked():
global count
count += 1
Click.configure(text=count)
Click = Label(root, text='0', font='Arial 35')
Click.pack()
btn = Button(root, text='Click on me', padx='20', pady='20', cursor="hand2", command=clicked)
btn.pack()
root.mainloop()
```
#### File: A1exTrask/Python3_AutoClicker/idrz.py
```python
from tkinter import *
import keyboard
import pyautogui
isRun = [False]
def callback():
if isRun[0]:
isRun[0] = False
print("off")
else:
isRun[0] = True
print("on")
tick()
def tick():
if not isRun[0]:
return
pyautogui.click(button='left')
root.after(1000, tick)
root = Tk()
keyboard.add_hotkey("f8", callback)
b = Button(root, text="OK", command=callback)
b.pack()
tick()
root.mainloop()
``` |
{
"source": "a1exwang/acoustic_language",
"score": 3
} |
#### File: acoustic_language/nn/addMNISTrnn.py
```python
from __future__ import print_function
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
#from keras.initializations import norRemal, identity
from keras.layers.recurrent import SimpleRNN, LSTM, GRU
from keras.optimizers import RMSprop, Adadelta
from keras.layers.convolutional import Convolution2D
from keras.layers.core import Dense, Activation, TimeDistributedDense, Dropout, Reshape, Flatten
from keras.layers.wrappers import TimeDistributed
from keras.models import model_from_json
#import json
def create_model(maxToAdd, size):
model = Sequential()
model.add(TimeDistributed(Convolution2D(8, 4, 1, border_mode='valid'), input_shape=(maxToAdd,1,size*size,1)))
model.add(Activation('relu'))
model.add(TimeDistributed(Convolution2D(16, 3, 1, border_mode='valid')))
model.add(Activation('relu'))
model.add(Reshape((maxToAdd,np.prod(model.output_shape[-3:]))))
model.add(TimeDistributed(Flatten()))
model.add(Activation('relu'))
model.add(GRU(output_dim=100,return_sequences=True))
model.add(GRU(output_dim=50,return_sequences=False))
model.add(Dropout(.2))
model.add(Dense(1))
rmsprop = RMSprop()
model.compile(loss='mean_squared_error', optimizer=rmsprop)
return model
def main():
# for reproducibility
np.random.seed(2016)
#define some run parameters
batch_size = 32
nb_epochs = 20
examplesPer = 60000
maxToAdd = 8
hidden_units = 200
size = 28
#cutoff = 1000
model = create_model(maxToAdd=maxToAdd, size=size)
# the data, shuffled and split between train and test sets
(X_train_raw, y_train_temp), (X_test_raw, y_test_temp) = mnist.load_data()
#ignore "cutoff" section in full run
#X_train_raw = X_train_raw[:cutoff]
#X_test_raw = X_test_raw[:cutoff]
#y_train_temp = y_train_temp[:cutoff]
#y_test_temp = y_test_temp[:cutoff]
#basic image processing
X_train_raw = X_train_raw.astype('float32')
X_test_raw = X_test_raw.astype('float32')
X_train_raw /= 255
X_test_raw /= 255
print('X_train_raw shape:', X_train_raw.shape)
print(X_train_raw.shape[0], 'train samples')
print(X_test_raw.shape[0], 'test samples')
print("Building model")
#define our time-distributed setup
for ep in range(0,nb_epochs):
X_train = []
y_train = []
X_test = []
y_test = []
X_train = np.zeros((examplesPer,maxToAdd,1,size*size,1))
for i in range(0,examplesPer):
#initialize a training example of max_num_time_steps,im_size,im_size
output = np.zeros((maxToAdd,1,size*size, 1))
#decide how many MNIST images to put in that tensor
numToAdd = np.ceil(np.random.rand()*maxToAdd)
#sample that many images
indices = np.random.choice(X_train_raw.shape[0],size=numToAdd)
example = np.reshape(X_train_raw[indices], [X_train_raw[indices].shape[0], 28*28, 1])
#sum up the outputs for new output
exampleY = y_train_temp[indices]
output[0:numToAdd,0,:,:] = example
X_train[i,:,:,:,:] = output
y_train.append(np.sum(exampleY))
y_train = np.array(y_train)
if ep == 0:
print("X_train shape: ",X_train.shape)
print("y_train shape: ",y_train.shape)
for i in range(60000):
loss = model.train_on_batch(X_train[i:i+10], y_train[i:i+10])
print("loss %f" % loss)
#Test the model
X_test = np.zeros((examplesPer,maxToAdd,1,size,size))
for i in range(0,examplesPer):
output = np.zeros((maxToAdd,1,size,size))
numToAdd = np.ceil(np.random.rand()*maxToAdd)
indices = np.random.choice(X_test_raw.shape[0],size=numToAdd)
example = X_test_raw[indices]
exampleY = y_test_temp[indices]
output[0:numToAdd,0,:,:] = example
X_test[i,:,:,:,:] = output
y_test.append(np.sum(exampleY))
X_test = np.array(X_test)
y_test = np.array(y_test)
preds = model.predict(X_test)
#print the results of the test
print(np.sum(np.sqrt(np.mean([ (y_test[i] - preds[i][0])**2 for i in range(0,len(preds)) ]))))
print("naive guess", np.sum(np.sqrt(np.mean([ (y_test[i] - np.mean(y_test))**2 for i in range(0,len(y_test)) ]))))
if __name__ == '__main__':
main()
```
#### File: acoustic_language/nn/helpers.py
```python
import keras.metrics
import keras.backend as K
from data.maps_constants import SEMITONES_ON_PIANO
def nhot_acc(y_true, y_pred):
# n_activated = K.sum(y_true, axis=1)
return keras.metrics.top_k_categorical_accuracy(y_true=y_true, y_pred=y_pred)
def mynhot_acc(y_true, y_pred, threshold=0.1):
zero_or_one = (K.sign(y_pred - threshold) / 2 + 0.5)
return 1 - K.sum(K.abs(y_true - zero_or_one)) / SEMITONES_ON_PIANO
```
#### File: acoustic_language/nn/language_model.py
```python
from keras.layers import Dense, Conv1D, LSTM, Conv2D, GRU
from keras.layers import Activation, AveragePooling2D, Dropout, Flatten, Reshape, Layer
from keras.layers.wrappers import TimeDistributed, Bidirectional
from keras.models import Sequential
from keras.optimizers import RMSprop, SGD
from nn.helpers import nhot_acc, mynhot_acc
from data.maps_constants import *
import numpy as np
class Model:
def __init__(self, timestamps):
# input_shape = (batch_size, timestamps, 1, input_freq_width, 1)
self.input_freq_width = 88*3
self.timestamps = timestamps
model = Sequential()
model.add(TimeDistributed(Conv2D(nb_filter=16,
nb_row=13*3+1,
nb_col=1,
border_mode='valid'),
input_shape=(self.timestamps, 1, self.input_freq_width, 1)))
model.add((Activation("relu")))
model.add(TimeDistributed(AveragePooling2D(pool_size=(3, 1))))
model.add(TimeDistributed(Conv2D(nb_filter=10,
nb_row=16,
nb_col=1,
border_mode='valid')))
model.add(Activation("relu"))
model.add(TimeDistributed(AveragePooling2D(pool_size=(5, 1))))
model.add(TimeDistributed(Flatten()))
model.add(TimeDistributed(Dense(output_dim=256)))
model.add(Activation("relu"))
# model.add(Bidirectional(LSTM(output_dim=88, return_sequences=False)))
#
# model.add(Reshape(target_shape=(self.timestamps, self.input_freq_width),
# input_shape=(self.timestamps, 1, self.input_freq_width, 1)))
model.add(LSTM(output_dim=88, return_sequences=False))
# model.add(Dropout(0.2))
model.add(Dense(output_dim=SEMITONES_ON_PIANO))
model.add(Activation("softmax"))
# model.add(Reshape((SEMITONES_ON_PIANO * timestamps,)))
model.summary()
opt = RMSprop(lr=1e-5)
# opt = SGD(lr=0.00001, momentum=0.9, decay=0.0005)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=[nhot_acc, mynhot_acc])
self.model = model
def get_model(self):
return self.model
def train(self, x_batch, y_batch):
return self.model.train_on_batch(
x_batch,
y_batch)
def make_input(self, x, y, batch_size, timestamps):
assert(x.shape[0] == y.shape[0])
big_batch_size = batch_size * timestamps
data_count = x.shape[1] // big_batch_size
for i in range(data_count):
x_batch = x[i*big_batch_size:(i+1)*big_batch_size, :]
y_batch = y[i*big_batch_size:(i+1)*big_batch_size, :]
y_batch_seq = np.reshape(y[i*big_batch_size:(i+1)*big_batch_size:timestamps, :],
[batch_size, SEMITONES_ON_PIANO])
x_batch = np.reshape(x_batch, [batch_size, timestamps, 1, self.input_freq_width, 1])
y_batch = np.reshape(y_batch, [batch_size, timestamps * SEMITONES_ON_PIANO])
# yield (x_batch, y_batch)
yield (x_batch, y_batch_seq)
def save_to_file(self, file_path):
pass
def load_from_file(self, file_path):
pass
``` |
{
"source": "a1exwang/dafx_labs",
"score": 2
} |
#### File: a1exwang/dafx_labs/synth.py
```python
import IPython.display as ipd
from datetime import datetime
import numpy as np
import scipy.signal
import math
import sys
from toposort import toposort, toposort_flatten
import matplotlib.pyplot as plt
import librosa.display
import sounddevice as sd
# just for debug purpose
np.set_printoptions(threshold=sys.maxsize)
np.seterr(all='raise')
#### helpers
# pan in (-60, 60)
# Based on DAFX chapter "SPATIAL EFFECTS", p144
# Assume loudspeaker is place in front of the listener, 60 fov.
def panning(x, pan):
theta0 = math.pi / 6
if len(x.shape) == 1:
# mono -> stereo
l, r = x, x
else:
l, r = x[0], x[1]
p = pan / 180 * math.pi
a = (math.cos(p)+math.sin(p)*math.tan(theta0)) / (math.cos(p)-math.sin(p)*math.tan(theta0))
l_out = l * math.sqrt(1 / (1 + a*a))
r_out = r * math.sqrt(1 / (1 + a*a)) * a
return np.array([l_out, r_out])
def periodize(f, T):
assert(type(T) == int)
def _f(t):
return f(t % T)
return _f
def load_sample(file_path, sr=None):
rate, x = scipy.io.wavfile.read(file_path)
x = x.T
if x.dtype == np.int16:
x = x.astype(float) / 2**15
x = x
print('load sample ' + file_path + ' ' + str(sr))
if not sr:
return rate, x
else:
n2 = int(x.shape[-1] * sr / rate)
y = np.zeros((2, n2))
y[0, :] = scipy.signal.resample(x[0, :], n2)
y[1, :] = scipy.signal.resample(x[1, :], n2)
return sr, y
#### Generators
def sine(A, pan):
def _sine(sr, f, t):
return panning(A * np.exp(-1j * 2 * np.pi * f * t), pan)
return _sine
# General Saw wave
# width = 1 is rising sawtooth
# width = 0.5 is triangle
# width = 0 is falling sawtooth
# pan is in [-1, 1], -1 for left, 1 for right
def saw(A, width=1, pan=0):
def _saw(sr, f, t):
real = scipy.signal.sawtooth(2 * np.pi * f * t, width=width)
im = scipy.signal.sawtooth(2 * np.pi * f * t + np.pi / 2, width=width)
y = A * (real + 1j * im)
return panning(y, pan)
return _saw
def noise(A, pan):
def _f(sr, f, t):
a = math.ceil(sr / f)
n = t.shape[-1]
y = np.random.random(n + a) * A
return panning(y[:n-a] + 1j * y[a:], pan)
return _f
def sampler(A, file_path):
def _f(sr, f, t):
rate, x = load_sample(file_path, sr)
x *= A
assert(rate==sr)
n = t.shape[-1]
if n > x.shape[-1]:
return np.append(x, np.zeros((2, n - x.shape[-1])))
else:
return x[:, :n]
return _f
#### Filters
def pass_thru():
return (lambda sr, x: x)
# Simple delay line.
# y[n] = x[n] + decay * y[n-d]
# d is in seconds
def delay(d, decay):
def _delay(sr, x):
y = np.full_like(x, 0)
delay_count = max(int(d * sr), 0)
for i in range(x.shape[1]):
if i - delay_count < 0:
delay_y = 0
else:
delay_y = y[:, i-delay_count]
y[:, i] = x[:, i] + decay * delay_y
return y
return _delay
# Variable-delay-value delay line.
# @delay_func: delay_func(i) gives the delay value at sample point `i`
# This can help implementing Vibrato, Echo, Flanger, Chorus
# DAFX 2.6.2 Flanger, chorus, slapback, echo
def vdelay(delay_func, decay_func):
def _f(sr, x):
y = np.full_like(x, 0)
for i in range(x.shape[-1]):
delay_count = max(int(delay_func(i)*sr), 0)
decay = decay_func(i)
if i-delay_count >= 0:
y[:, i] = x[:, i] + decay * y[:, i-delay_count]
else:
y[:, i] = 0
return y
return _f
# IIR Filter
# @btype: one of ['lowpass', 'highpass', 'bandpass', 'bandstop']
# @Wn:
# @bw: bandwidth, unit in sr/2 = 1
def iirfilter(btype, wpass, wstop, gpass=3, gstop=35):
N, Wn = scipy.signal.buttord(wpass, wstop, gpass, gstop, analog=False)
def _f(sr, x):
b, a = scipy.signal.butter(N, Wn, btype, analog=False)
ret = scipy.signal.filtfilt(b, a, x).astype('complex128')
return ret
return _f
## Modulators
def ring_modulator(f_c, carrier_func=np.sin, phi0=0):
def _f(sr, x):
n = x.shape[-1]
return carrier_func(2*np.pi * f_c/sr * np.arange(n) + phi0) * x.real + \
1j * carrier_func(2*np.pi * f_c/sr * np.arange(n) + phi0 + np.pi/2) * x.imag
return _f
def amplitude_modulator(f_c, alpha, carrier_func=np.sin, phi0=0):
def _f(sr, x):
n = x.shape[-1]
return (1 + alpha * carrier_func(2*np.pi * f_c/sr * np.arange(n) + phi0)) * x.real + \
(1 + alpha * carrier_func(2*np.pi * f_c/sr * np.arange(n) + phi0 + np.pi/2)) * x.imag
return _f
def phase_modulator(f_c, A=1, k=1):
f = lambda sr, n, x: A * np.cos(2*np.pi* f_c/sr * np.arange(n) + k * x.real)
def _f(sr, x):
n = x.shape[-1]
return f(sr, n, x.real) + 1j * f(sr, n, x.imag)
return _f
def frequency_modulator(f_c, A=1, k=1):
def _f(sr, x):
n = x.shape[-1]
sum_x = np.full_like(x, 0)
for i in range(n):
sum_x[:, i] = np.sum(x[:, i])
f = lambda data: A * np.cos(2*np.pi* f_c/sr * np.arange(n) + 2*np.pi * k * data)
return f(sum_x.real) + 1j*f(sum_x.imag)
return _f
def ssb_modulator(f_c, carrier_func=np.cos):
def _f(sr, x):
n = x.shape[-1]
return carrier_func(2*np.pi * f_c/sr * np.arange(n)) * x.real - \
np.sign(f_c) * carrier_func(2*np.pi * f_c/sr * np.arange(n) + np.pi/2) * x.imag
return _f
#### Dynamic Range Control
# The implementation is from DAFX: p110.
# But for detailed explanation,
# please refer to Digital Audio Signal Processing, Chapter 7 Dynamic Range Control
def limiter(threshold_db, attack_time, release_time, delay_time, plot=False):
def _f(sr, x):
threshold = 10 ** (threshold_db/10)
at = 1 - math.exp(-2.2/(attack_time*sr))
rt = 1 - math.exp(-2.2/(release_time*sr))
n = x.shape[-1]
delay_n = round(delay_time*sr)
def calculate(x_in):
gain = np.array([1, 1])
y = np.full_like(x_in, 0)
abs_xn = np.abs(x_in)
gains = np.full_like(x_in, 0)
xpeak = peak_level_measurement(sr, x_in, attack_time, release_time)
for i in range(n):
# Do not replace this with min(1, threshold/xpeak) for DivisionByZero error
f = np.full_like(gain, 0)
for j in range(len(xpeak)):
f[j] = threshold/xpeak[j] if xpeak[j] > threshold else 1
k = np.where(f < gain, at, rt)
gain = (1-k)*gain + k*f
gains[:, i] = gain
y[:, i] = gain * x_in[:, i-delay_n] if i-delay_n >= 0 else 0
return y, gains
y_real, gain_real = calculate(x.real)
y_imag, _ = calculate(x.imag)
if plot:
plt.plot(np.arange(x.shape[-1])/sr, 10*np.log10(gain_real[0, :]))
return y_real + 1j*y_imag
return _f
# The implementation is from DAFX: p112.
# But for detailed explanation,
# please refer to Digital Audio Signal Processing, Chapter 7 Dynamic Range Control
def compressor(compressor_threshold_db,
compressor_scale,
expander_threshold_db,
expander_scale,
attack_time,
release_time,
delay_time,
average_time,
plot=False):
def _f(sr, x):
at = 1 - math.exp(-2.2/(attack_time*sr))
rt = 1 - math.exp(-2.2/(release_time*sr))
tav = 1 - math.exp(-2.2/(average_time*sr))
n = x.shape[-1]
delay_n = round(delay_time*sr)
def calculate(x_in):
xrms = np.array([0, 0])
gain = np.array([1, 1])
y = np.full_like(x_in, 0)
gains = np.full_like(x_in, 0)
for i in range(n):
xrms = (1-tav)*xrms + tav*x_in[:, i]*x_in[:, i]
gdb = np.full_like(xrms, 0)
for j in range(len(xrms)):
if xrms[j] == 0:
gdb[j] = 0
else:
xdb = 10 * np.log10(xrms[j])
#print('xdb', xdb)
gdb[j] = min(
0,
compressor_scale*(compressor_threshold_db-xdb),
expander_scale*(expander_threshold_db-xdb))
f = 10**(gdb/20)
k = np.where(f < gain, at, rt)
gain = (1-k)*gain + k*f
gains[:, i] = gain
y[:, i] = gain * x_in[:, i-delay_n] if i-delay_n >= 0 else 0
return y, gains
y_real, gain_real = calculate(x.real)
y_imag, _ = calculate(x.imag)
if plot:
plt.plot(np.arange(x.shape[-1])/sr, 10*np.log10(gain_real[0, :]))
return y_real + 1j*y_imag
return _f
#### Time and Frequency Warping
# Straight-forward time warping without interpolation
def time_warping(theta):
def _f(sr, x):
n = x.shape[-1]
y = np.full_like(x, 0)
for t in range(n):
m = np.clip(int(theta(t)), 0, n)
y[:, t] = x[:, m]
return y
return _f
#### Spatial Effects
# Convolution Reverb
def convolver(h):
def _f(sr, x):
y = np.full_like(x, 0)
n = x.shape[-1]
for i in range(x.shape[0]):
# result length >= n
y[i, :] = np.convolve(x[i, :], h[i, :], mode='same')
return y
return _f
#### A simple player and mixer
def mix(sr, freq, time_points, generators, filters, connections, output_channels=('0',), profile=True):
deps = {}
for f, t in connections:
if t in deps:
deps[t].add(f)
else:
deps[t] = set([f])
channel_outs = {}
sort_result = toposort(deps)
profile_generator = {}
processed_channels = set()
all_channels = set([x for x in generators] + [x for x in filters])
def process_own_channel(channel, channel_in=np.zeros([2, len(time_points)], dtype='complex128')):
channel_out = channel_in
if channel in generators:
for i, gen in enumerate(generators[channel]):
t1 = datetime.now()
channel_out += gen(sr, freq, time_points)
t2 = datetime.now()
if profile:
print('channel "%s", id=%d, generator "%s", time=%s' % (channel, i, gen, t2-t1))
# If not filters, assume passing through
if channel in filters:
for i, filt in enumerate(filters[channel]):
t1 = datetime.now()
channel_out = filt(sr, channel_out)
t2 = datetime.now()
if profile:
print('channel "%s", id=%d, filter "%s", time=%s' % (channel, i, filt, t2-t1))
return channel_out
for channels in sort_result:
for channel in channels:
channel_in = np.zeros([2, len(time_points)], dtype='complex128')
if channel in deps:
for dep_channel in deps[channel]:
channel_in += channel_outs[dep_channel]
channel_outs[channel] = process_own_channel(channel, channel_in)
processed_channels.add(channel)
for channel in all_channels - processed_channels:
channel_outs[channel] = process_own_channel(channel)
ret = []
for c in output_channels:
ret.append(channel_outs[c])
return ret
def plot_dft(sr, y, title='', ylim=None):
z = np.fft.fft(y)
mag = np.abs(np.real(z)) / (len(y)/2)
db = np.log10(np.where(mag > 1e-10, mag, 1e-10)) * 10
#phi = np.angle(z) / np.pi * 180
fs = np.fft.fftfreq(y.shape[-1]) * sr
valid_n = len(fs) // 2
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
p = ax.plot(fs[:valid_n], db[:valid_n])
plt.xlabel('f(Hz)')
plt.ylabel('mag(dB)')
if ylim:
plt.ylim(*ylim)
plt.xlim(20, 20000)
plt.title(title)
ax.set_xscale('log')
def plot_filter_transfer_function(sr, f, stereo=True):
x = np.zeros([2, sr])
x[:, 0] = sr / 2
y = f(sr, x)
plot_dft(sr, y[0], title='Transfer Function(Magnitude), L')
plot_dft(sr, y[1], title='Transfer Function(Magnitude), R')
def easy_visualize(sr, y):
first_n = 1024
# wave left
plt.figure()
plt.plot(np.arange(min(first_n, np.shape(y)[1])) / sr, y[0, :first_n])
# wave right
#plt.figure()
#plt.plot(np.arange(min(first_n, np.shape(y)[1])) / sr, y[1, :first_n])
# dft
Yl, Yr = librosa.stft(y[0]), librosa.stft(y[1])
Ydb_l, Ydb_r = librosa.amplitude_to_db(abs(Yl)), librosa.amplitude_to_db(abs(Yr))
plt.figure()
librosa.display.specshow(Ydb_l, sr=sr, x_axis='time', y_axis='log')
plot_dft(sr, y[0], ylim=(-50, 3))
#plot_dft(sr, y[1], ylim=(-50, 3))
plt.show()
sr = 44100
T = 2
t = np.linspace(0, T, int(T*sr))
f = 220
print('load reverb')
_, reverb1_h = load_sample('reverb1.wav', sr)
print('load reverb done')
generators = {
'saw': [
saw(0.5, 0.5, pan=30),
#noise(0.5, pan=0),
],
'sine': [
sine(A=0.5, pan=-30),
],
'drums': [
sampler(A=0.5, file_path='drums.wav'),
],
'piano': [
sampler(A=0.5, file_path='piano.wav'),
]
}
filters = {
# 'vdelay': [
# delay(0.1, 0.5),
# vdelay(
# lambda i: 0.3*(math.sin(2*math.pi*0.5*i/sr)+1)/2, lambda i: 0.5),
# ],
# 'slapback': [
# delay(d=0.015, decay=0.5),
# ],
# 'echo': [
# delay(d=0.05, decay=0.5),
# ],
# 'vibrato': [
# vdelay(
# lambda i: 0.0075 + 0.0025*math.sin(2*math.pi*5*i/sr), lambda i: 0.8),
# ],
# 'flanger': [
# vdelay(
# lambda i: 0.010 + 0.005*math.sin(2*math.pi*0.5*i/sr), lambda i: 0.8),
# ],
'2': [
delay(0.8, 0.5),
],
'iir': [
iirfilter('lowpass', 1000/(sr/2), 1500/(sr/2)),
],
# 'rm': [
# ring_modulator(f_c=50, carrier_func=np.sin),
# ],
# 'am': [
# amplitude_modulator(f_c=2, alpha=0.5, carrier_func=np.sin),
# ],
# 'pm': [
# phase_modulator(f_c=2),
# ],
# 'fm': [
# frequency_modulator(f_c=2, k=1),
# ],
# 'ssb': [
# ssb_modulator(f_c=-2)
# ],
# 'compressor': [
# compressor(compressor_threshold_db=-40,
# compressor_scale=0.9,
# expander_threshold_db=0,
# expander_scale=1,
# attack_time=0.01,
# release_time=0.01,
# delay_time=0.001,
# average_time=0.05,
# plot=True)
# ],
'time-warping': [
time_warping(periodize(lambda x: np.exp(x/sr * 10)/np.exp(10) * sr, 1 * sr))
],
# 'reverb': [
# convolver(reverb1_h),
# ]
}
connections = [
# ('saw', 'iir'),
# ('saw', 'vdelay'),
# ('vdelay', 'master'),
# ('iir', 'master'),
# ('saw', 'rm'),
# ('saw', 'am'),
# ('saw', 'pm'),
# ('saw', 'fm'),
# ('saw', 'ssb'),
# ('piano', 'compressor'),
('piano', 'time-warping'),
]
y_complex, = mix(sr, f, t, generators, filters, connections, output_channels=('time-warping',))
y = y_complex.real
# # scipy wants y to be (nsamples, nchannels)
# #scipy.io.wavfile.write('audio.wav', sr, y.T.astype('float32'))
# # Or play it directly
# #sd.default.samplerate = sr
# #sd.play(qy.T, blocking=True)
# # Also, you can visualize it
# easy_visualize(sr, y)
# #plot_filter_transfer_function(sr, delay(1/100, 0.5), stereo=False)
# # When in ipython play sound in this way
# ipd.Audio(y, rate=sr)
print(123)
``` |
{
"source": "a1exwang/fm-synth",
"score": 3
} |
#### File: fm-synth/gui/helpers.py
```python
import pyqtgraph as pg
import math
class LogValueAxis(pg.AxisItem):
def __init__(self, *args, **kwargs):
pg.AxisItem.__init__(self, *args, **kwargs)
def tickStrings(self, values, scale, spacing):
strings = []
for v in values:
# vs is the original tick value
vs = v * scale
vstr = '%0.0f' % (math.exp(vs),)
strings.append(vstr)
return strings
```
#### File: fm-synth/gui/monitors.py
```python
import pyqtgraph as pg
from PyQt5.QtCore import pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QWidget
from PyQt5 import QtGui
from PyQt5.QtCore import QObject, Qt
from PyQt5.QtWidgets import QHBoxLayout
from pprint import pprint
from PyQt5.QtWebEngineWidgets import QWebEngineView as QWebView
from PyQt5.QtWebEngineWidgets import QWebEngineSettings
from PyQt5.QtCore import QUrl
import os
from api.main import start_web_server, set_data
import PyQt5.Qt
from gui.slider import ConnectSlider
from channels.channel import Channel
class FMSynthGUI(QObject):
update_graph_signal = pyqtSignal('PyQt_PyObject', 'PyQt_PyObject', 'PyQt_PyObject',
'PyQt_PyObject', 'PyQt_PyObject', name='graph_needs_updating')
def __init__(self):
super().__init__()
self.app = QtGui.QApplication([])
self.graphics_window = pg.GraphicsWindow()
self.graphics_window.resize(800, 450)
self.graphics_window.setWindowTitle('FM8 Synthesizer Main Panel')
self.update_graph_signal.connect(self.update_graph)
self.plot_count = 0
self.slider_panel = QWidget(flags=Qt.Widget)
self.slider_panel.resize(300, 200)
self.sp_layout = QHBoxLayout()
self.slider_panel.setLayout(self.sp_layout)
self.sliders = []
self.slider_panel.show()
self.web_view = QWebView()
def post_init(self, out):
for i in range(5):
s = ConnectSlider(name='Slider %g' % i)
self.sliders.append(s)
self.sp_layout.addWidget(s)
ops = []
out.dump(ops)
pprint(ops)
start_web_server()
objs = []
out.dump(objs)
set_data(objs)
path = os.path.abspath(os.path.dirname(__file__)) + '/../web/main.html'
self.web_view.load(QUrl.fromLocalFile(path))
# self.web_view.page().settings().setAttribute(QWebEngineSettings)
self.web_view.show()
@pyqtSlot('PyQt_PyObject', 'PyQt_PyObject', 'PyQt_PyObject', 'PyQt_PyObject', 'PyQt_PyObject', name='update_graph')
def update_graph(self, curve, args, kwargs, pl, resize):
curve.setData(*args, **kwargs)
if resize:
pl.enableAutoRange('x', False)
@staticmethod
def start():
QtGui.QApplication.instance().exec_()
def add_plot(self, title, *args, **kargs):
if self.plot_count % 3 == 0:
self.graphics_window.nextRow()
self.plot_count += 1
return self.graphics_window.addPlot(title=title, *args, **kargs)
```
#### File: fm-synth/gui/slider.py
```python
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QSlider, QLabel, QComboBox
from PyQt5.QtCore import Qt
from PyQt5.QtCore import pyqtSlot, pyqtSignal
from channels.channel import Channel
class DoubleSlider(QSlider):
# create our our signal that we can connect to if necessary
doubleValueChanged = pyqtSignal(float)
def __init__(self, *args, **kwargs):
super(DoubleSlider, self).__init__(*args, **kwargs)
self._min_value = 0
self._max_value = 1
self._step = 0.01
self.valueChanged.connect(self.emitDoubleValueChanged)
def _step_count(self):
return int((self._max_value - self._min_value) / self._step)
def emitDoubleValueChanged(self):
value = (float(super(DoubleSlider, self).value()) + self._min_value) * self._step
self.doubleValueChanged.emit(value)
def setRange(self, range_min, range_max):
assert range_min < range_max
self._min_value = range_min
self._max_value = range_max
super(DoubleSlider, self).setRange(0, self._step_count())
def singleStep(self):
return self._step
def setSingleStep(self, value):
self._step = value
assert value > 0
assert self._step_count() > 0
super(DoubleSlider, self).setRange(0, self._step_count())
return super(DoubleSlider, self).setSingleStep(1)
def value(self):
int_value = super(DoubleSlider, self).value()
return (float(int_value) + self._min_value) * self._step
def setValue(self, value):
super(DoubleSlider, self).setValue(int((value - self._min_value) / self._step))
class ConnectSlider(QWidget):
def __init__(self, name):
super().__init__(flags=Qt.Widget)
self.name = name
self.channel = Channel.get_instance()
self.connected_channel = None
self.layout = QVBoxLayout()
self.setLayout(self.layout)
self.slider = DoubleSlider()
self.channel_selector = QComboBox()
self.label_value = QLabel(text='0')
self.label_name = QLabel(text=self.name)
self.channel_selector.setEditable(True)
self.channel_selector.addItems(['Unconnected'] + self.channel.get_channels())
self.channel_selector.currentIndexChanged.connect(self.channel_selected)
self.channel_selector.setInsertPolicy(QComboBox.NoInsert)
self.connected_channel = None
self.slider.doubleValueChanged.connect(self.slider_changed)
self.layout.addWidget(self.label_name)
self.layout.addWidget(self.channel_selector)
self.layout.addWidget(self.slider)
self.layout.addWidget(self.label_value)
self.layout.setAlignment(Qt.AlignRight)
@pyqtSlot(float, name='slider_changed')
def slider_changed(self, val):
self.label_value.setText(str(val))
if self.connected_channel:
self.connected_channel(val)
@pyqtSlot(int, name='channel_selected')
def channel_selected(self, index):
if index > 0:
name = self.channel.get_channels()[index - 1]
# NOTE(aocheng):
# The slot must set before the range and step is set, or the old channel will have a wrong value.
slot = self.channel.get_channel(name)
self.connected_channel = slot
get_val = self.channel.get_channel_val(name)
range_min, range_max, step = self.channel.get_channel_range_and_step(name)()
self.slider.setRange(range_min, range_max)
self.slider.setSingleStep(step)
self.slider.setValue(get_val())
```
#### File: fm-synth/operators/device_output.py
```python
from PyQt5.QtCore import pyqtSlot
from channels.channel import Channel
from operators.base import OutputOperator
import numpy as np
class DeviceOutput(OutputOperator):
def __init__(self, input_ops, volume=1.0, name=None):
super().__init__(input_ops, name)
self.total_count = 0
self.stream = None
self.volume = volume
self.channel = Channel.get_instance()
self.channel.add_channel(name='MasterVol', slot=self.volume_changed, get_val=lambda: self.volume)
@pyqtSlot(float, name='volume_changed')
def volume_changed(self, vol):
if vol <= 0:
vol = 0
if vol >= 1:
vol = 1
self.volume = vol
def next_buffer(self, input_buffers, n):
if len(input_buffers) == 1:
# mono
# [-1, 1) -> [0, 2**16)
arr = ((np.array(input_buffers[0], dtype='float32') + 1) / 2) * 2**16
arr = np.transpose(np.array([arr, arr]))
else:
# stereo
arr_l = ((np.array(input_buffers[0], dtype='float32') + 1) / 2) * 2 ** 16
arr_r = ((np.array(input_buffers[1], dtype='float32') + 1) / 2) * 2 ** 16
arr = np.transpose(np.array([arr_l, arr_r]))
result = np.array(arr, dtype='int16')
return [result * self.volume]
```
#### File: fm-synth/operators/reduce.py
```python
from operators.base import Operator
import numpy as np
import enum
class ReduceOperator(Operator):
"""
Input n
Output 1
Input 0 * Input 1 * Input 2 ...
"""
class ReduceOperations(enum.Enum):
# name, initial value, reduce function
SUM = ('sum', 0, lambda a, b: a + b)
MUL = ('mul', 1, lambda a, b: a * b)
def __init__(self, input_ops, operation, name=None):
if name is None:
name = "%s#%d" % (operation.value[0], Operator.alloc_id())
super().__init__(input_ops,
1,
input_ops[0][0].sr,
input_ops[0][0].buffer_size,
name)
self.operation = operation
def next_buffer(self, input_buffers, n):
result = np.ones([self.buffer_size]) * self.operation.value[1]
for input_buffer in input_buffers:
result = self.operation.value[2](result, input_buffer)
return [result]
```
#### File: a1exwang/fm-synth/play.py
```python
import pyaudio
import math
import scipy.io.wavfile
import numpy as np
class Player:
def __init__(self, sr, buffer_size, input_op, all_output_operators):
self.input_op = input_op
self.all_output_operators = all_output_operators
self.current_offset = 0
self.stream = None
self.buffer_size = buffer_size
self.sr = sr
def callback(self, in_data, frame_count, time_info, flag):
if flag:
print("Playback Error: %i" % flag)
assert(frame_count == self.buffer_size)
self.current_offset += 1
for op in self.all_output_operators:
op.step(self.current_offset + 1)
result = self.input_op[0].output_buffers[self.input_op[1]]
return result.tobytes(), pyaudio.paContinue
def play_non_blocking(self):
pa = pyaudio.PyAudio()
self.stream = pa.open(format=pyaudio.paInt16,
channels=2,
rate=self.sr,
output=True,
frames_per_buffer=self.buffer_size,
stream_callback=self.callback)
# while stream.is_active():
# time.sleep(0.1)
#
# stream.close()
# pa.terminate()
def play(self):
pa = pyaudio.PyAudio()
stream = pa.open(format=pyaudio.paInt16,
channels=2,
rate=self.sr,
output=True)
data, state = self.callback(None, self.buffer_size, 0, None)
while state == pyaudio.paContinue:
stream.write(data)
data, state = self.callback(None, self.buffer_size, 0, None)
stream.close()
pa.terminate()
def save(self, output_file, length):
buffer_count = int(math.ceil(length * self.sr / self.buffer_size))
result = np.array([], dtype='float')
for i in range(buffer_count):
self.input_op[0].step(i + 1)
result = np.array([*result, *self.input_op[0].output_buffers[self.input_op[1]]])
scipy.io.wavfile.write(output_file, self.sr, result)
``` |
{
"source": "a1exwang/na_algorithms",
"score": 3
} |
#### File: a1exwang/na_algorithms/main.py
```python
from libs import my_sum, newton, iteration_solver, hilbert, power, fitting, cho_solve
import numpy as np
def do_sum():
print('1.2 Sum')
my_sum(
lambda n, sigma: print(n, sigma, 1 / n) if n % 100000 == 0 else None,
10000000000,
np.float)
print('------------------------')
def do_choleskey():
print('2.2 Cholesky')
n = 10
A = hilbert(n)
x = np.ones(n, dtype='double')
b = np.dot(A, x)
x1 = cho_solve(A, b)
r = b - np.dot(A, x1)
delta_x = x - x1
result1 = np.max(r)
result2 = np.max(delta_x)
print('r_max %g, delta_x_max %g' % (result1, result2))
b = np.dot(A, x) + 1e-7
x1 = cho_solve(A, b)
r = b - np.dot(A, x1)
delta_x = x - x1
result1 = np.max(r)
result2 = np.max(delta_x)
print('10^-7, r_max %g, delta_x_max %g' % (result1, result2))
n = 8
A = hilbert(n)
x = np.ones(n, dtype='double')
b = np.dot(A, x)
x1 = cho_solve(A, b)
r = b - np.dot(A, x1)
delta_x = x - x1
result1 = np.max(r)
result2 = np.max(delta_x)
print('n=8, r_max %g, delta_x_max %g' % (result1, result2))
n = 12
A = hilbert(n)
x = np.ones(n, dtype='double')
b = np.dot(A, x)
x1 = cho_solve(A, b)
r = b - np.dot(A, x1)
delta_x = x - x1
result1 = np.max(r)
result2 = np.max(delta_x)
print('n=12, r_max %g, delta_x_max %g' % (result1, result2))
print('--------------------------')
def do_newton():
print("2.2 Newton")
print("x**3-x-1")
newton(
lambda x: x**3 - x - 1,
lambda x: 3*x**2 - 1,
0.6,
lambda l0, i: l0 * 0.5**i,
1,
1e-5,
lambda k, la, y, y1, x, x1: print("(k, lambda, x, x1, delta_x) = (%d, %0.7f, %0.7f, %0.7f, %0.7f)" %
(k, la, x, x1, abs(x1 - x))))
print("-x**3 + 5*x")
newton(
lambda x: -x**3 + 5*x,
lambda x: -3*x**2 + 5,
1.2,
lambda l0, i: l0 * 0.5**i,
1,
1e-5,
lambda k, la, y, y1, x, x1: print("(k, lambda, x, x1, delta_x) = (%d, %0.7f, %0.7f, %0.7f, %0.7f)" %
(k, la, x, x1, abs(x1 - x))))
print('--------------------------')
def do_iteration_solver():
print('Iteration solver')
n = 10
threshold = 1e-4
A = hilbert(n)
b = 1.0 / np.arange(1, n+1, dtype='double')
x0 = np.zeros(n)
iteration_solver(
A,
b,
x0,
threshold,
# lambda n, x, delta: print(n, np.linalg.norm(delta), x) if n % 1 == 0 else None,
lambda n, x, delta: None,
method='sor',
omega=1.25)
A = hilbert(n)
b = 1.0 / np.arange(1, n+1, dtype='double')
x0 = np.zeros(n)
iteration_solver(
A,
b,
x0,
threshold,
lambda a, b, c: None,
# lambda n, x, delta: print(n, np.linalg.norm(delta), x) if n % 1 == 0 else None,
method='jacobi',
omega=1.25)
print('--------------------------')
def do_power():
print("Power iteration, Ax = lambda x, what is lambda and x")
A = np.array([
[5, -4, 1],
[-4, 6, -4],
[1, -4, 7]
], dtype='double')
x0 = np.ones(3, dtype='double')
power(A, x0, 1e-5,
lambda i, x, lambda1, delta: print("(i, delta, lambda1, x) = (%d, %f, %f, %s)"
% (i, delta, lambda1, str(x))))
A = np.array([
[25, -41, 10, -6],
[-41, 68, -17, 10],
[10, -17, 5, -5],
[-6, 10, -3, 2]
], dtype='double')
x0 = np.ones(4, dtype='double')
power(A, x0, 1e-5,
lambda i, x, lambda1, delta: print("(i, delta, lambda1, x) = (%d, %f, %f, %s)"
% (i, delta, lambda1, str(x))))
print('--------------------------')
def do_fitting():
print('Fitting')
data = np.array([
[1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, 8],
[33.4, 79.5, 122.65, 159.05, 189.15, 214.15, 238.65, 252.2, 267.55, 280.5, 296.65, 301.65, 310.4, 318.15, 325.15]
], dtype='double')
n = list(np.shape(data))[1]
x = np.reshape(data[0, :], [n])
y = np.reshape(data[1, :], [n])
r = fitting(data, 3)
f1 = lambda x: r[0] + r[1] * x + r[2] * x**2
d = np.linalg.norm(f1(x) - y) / np.sqrt(n)
print("y = %f + %fx + %fx^2, \td = %f" %(r[0], r[1], r[2], d))
data1 = np.reshape([x, np.log(y)], [2, n])
r = fitting(data1, 2)
a, b = np.exp(r[0]), r[1]
f2 = lambda x: a * np.exp(b * x)
d = np.linalg.norm(f2(x) - y) / np.sqrt(n)
print("y = %f e^(%ft), \t\t\td = %f" % (a, b, d))
print('--------------------------')
if __name__ == '__main__':
pass
do_choleskey()
do_newton()
do_iteration_solver()
do_power()
do_fitting()
``` |
{
"source": "a1exwang/qtrading-algorithms",
"score": 3
} |
#### File: a1exwang/qtrading-algorithms/random_seq_engine.py
```python
import numpy as np
import math
import matplotlib.pyplot as plt
class MyOperator:
def __init__(self):
self.init_price = 0
self.expected_return_rate = 0.8
self.max_return_rate = 0.8
self.max_last_prices = 100
self.last_prices = []
self.sell_percent = 0.5
self.buy_percent = 0.2
self.min_trade_period = 10
self.last_trade_time = -self.min_trade_period
def sell(self, t, shares):
self.last_trade_time = t
return -shares * self.sell_percent
def buy_in_cash(self, t, cash, price):
self.last_trade_time = t
print(cash)
return math.floor(cash / price) * self.buy_percent
def __call__(self, t, price, shares, cash, service_charge_rate):
self.last_prices.append(price)
if len(self.last_prices) > self.max_last_prices:
self.last_prices = self.last_prices[1:]
if t - self.last_trade_time >= self.min_trade_period:
if shares > 100:
if price < sum(self.last_prices) / len(self.last_prices) * 0.95:
return self.sell(t, shares)
if cash > 100:
if price < sum(self.last_prices) / len(self.last_prices) * 1.3:
return self.buy_in_cash(t, cash, price)
return 0
def simulate(init_price, init_cash, deltas, operator):
current_price = init_price
current_shares = (init_cash / 2) / current_price
current_cash = init_cash / 2
total_assets = []
prices = []
total_trade_values = []
total_cash = []
service_charge_rate = 0.001
for t, d in enumerate(deltas):
# > 0, buy x shares
# < 0, sell x shares
traded_shares = operator(t, current_price, current_shares, current_cash, service_charge_rate)
current_shares += traded_shares
current_cash -= traded_shares * current_price
service_charge = abs(traded_shares) * current_price * service_charge_rate
current_cash -= service_charge
total_assets.append(current_cash + current_shares * current_price)
prices.append(current_price)
total_trade_values.append(traded_shares * current_price)
total_cash.append(current_cash)
current_price = current_price * (1+d)
return np.array(total_assets), np.array(prices), total_trade_values, np.array(total_cash)
def run(your_operator, name):
deltas = np.concatenate((
np.random.uniform(-0.09, 0.11, 100),
np.random.uniform(-0.11, 0.09, 100),
np.random.uniform(-0.09, 0.10, 100),
np.random.uniform(-0.10, 0.09, 100),
np.random.uniform(-0.10, 0.10, 100),
))
init_price = 10.0
principle = 10000
total_assets, total_prices, total_trade_values, total_cash = simulate(init_price, principle, deltas, MyOperator())
total_assets2, _, total_trade_values2, total_cash2 = simulate(init_price, principle, deltas, your_operator)
plt.subplot('211')
plt.plot(total_assets, label='Asset(%s)' % 'trend')
plt.plot(total_assets2, label='Asset(%s)' % name)
plt.plot(total_prices/init_price * principle, label='Price')
plt.legend()
plt.subplot('212')
plt.plot(total_trade_values, label='Traded(%s)' % 'Trend')
plt.plot(total_trade_values2, label='Traded2(%s)' % name)
plt.plot(total_cash, label='Cash')
plt.legend()
plt.show()
``` |
{
"source": "a1exwang/theano-cnn-intro",
"score": 3
} |
#### File: a1exwang/theano-cnn-intro/layers.py
```python
import theano.tensor as T
import numpy as np
from utils import sharedX
import theano.tensor.nnet
from theano.tensor.nnet.conv import conv2d
from theano.tensor.signal.pool import pool_2d
class Layer(object):
def __init__(self, name, trainable=False):
self.name = name
self.trainable = trainable
def forward(self, inputs):
pass
def params(self):
pass
class Relu(Layer):
def __init__(self, name):
super(Relu, self).__init__(name)
def forward(self, inputs):
# Your codes here
return 0.5 * (T.abs_(inputs) + inputs)
class Sigmoid(Layer):
def __init__(self, name):
super(Sigmoid, self).__init__(name)
def forward(self, inputs):
# Your codes here
return 1 / (1 + T.exp(-inputs))
class Softmax(Layer):
def __init__(self, name):
super(Softmax, self).__init__(name)
def forward(self, inputs):
# Your codes here
return theano.tensor.nnet.softmax(inputs)
class Linear(Layer):
def __init__(self, name, inputs_dim, num_output, init_std):
super(Linear, self).__init__(name, trainable=True)
self.W = sharedX(np.random.randn(inputs_dim, num_output) * init_std, name=name + '/W')
self.b = sharedX(np.zeros((num_output)), name=name + '/b')
def forward(self, inputs):
# Your codes here
batch_size = inputs.shape[0]
n = T.prod(inputs.shape) / inputs.shape[0]
inputs = T.reshape(inputs, [batch_size, n])
return T.dot(inputs, self.W) + self.b
def params(self):
return [self.W, self.b]
class Convolution(Layer):
def __init__(self, name, kernel_size, num_input, num_output, init_std):
super(Convolution, self).__init__(name, trainable=True)
W_shape = (num_output, num_input, kernel_size, kernel_size)
self.W = sharedX(np.random.randn(*W_shape) * init_std, name=name + '/W')
self.b = sharedX(np.zeros((num_output)), name=name + '/b')
def forward(self, inputs):
# Your codes here
result = conv2d(input=inputs, filters=self.W, border_mode='valid')
s0, s2, s3 = result.shape[0], result.shape[2], result.shape[3]
result += T.repeat(T.repeat(T.repeat(T.reshape(
self.b, [1, self.b.shape[0], 1, 1]), s0, 0), s2, 2), s3, 3)
return result
def params(self):
return [self.W, self.b]
# return [self.W]
class Pooling(Layer):
def __init__(self, name, kernel_size):
super(Pooling, self).__init__(name)
self.kernel_size = kernel_size
def forward(self, inputs):
# Your coders here
return pool_2d(inputs,
ds=(self.kernel_size, self.kernel_size),
ignore_border=True,
mode='max')
```
#### File: a1exwang/theano-cnn-intro/optimizer.py
```python
import theano.tensor as T
from utils import sharedX
class SGDOptimizer(object):
def __init__(self, learning_rate, weight_decay=0.005, momentum=0.9):
self.lr = learning_rate
self.wd = weight_decay
self.mm = momentum
def get_updates(self, cost, params):
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
d = sharedX(p.get_value() * 0.0)
new_d = self.mm * d - self.lr * (g + self.wd * p)
updates.append((d, new_d))
updates.append((p, p + new_d))
return updates
class AdagradOptimizer(object):
def __init__(self, learning_rate, eps=1e-8):
self.lr = learning_rate
self.eps = eps
def get_updates(self, cost, params):
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, dx in zip(params, grads):
cache = sharedX(0)
new_d = self.lr * dx / (T.sqrt(cache) + self.eps)
updates.append((cache, T.sum(dx * dx)))
updates.append((p, p - new_d))
return updates
```
#### File: a1exwang/theano-cnn-intro/solve_net.py
```python
from utils import LOG_INFO
import numpy as np
def data_iterator(x, y, batch_size, shuffle=True):
indx = range(len(x))
if shuffle:
np.random.shuffle(indx)
for start_idx in range(0, len(x), batch_size):
end_idx = min(start_idx + batch_size, len(x))
yield x[start_idx: end_idx], y[start_idx: end_idx]
def solve_net(model, train_x, train_y, test_x, test_y,
batch_size, max_epoch, disp_freq, test_freq):
iter_counter = 0
loss_list = []
accuracy_list = []
test_acc = []
test_loss = []
for k in range(max_epoch):
for x, y in data_iterator(train_x, train_y, batch_size):
iter_counter += 1
loss, accuracy = model.train(x, y)
loss_list.append(loss)
accuracy_list.append(accuracy)
if iter_counter % disp_freq == 0:
msg = 'Training iter %d, mean loss %.5f (batch loss %.5f), mean acc %.5f' % (iter_counter,
np.mean(loss_list),
loss_list[-1],
np.mean(accuracy_list))
LOG_INFO(msg)
loss_list = []
accuracy_list = []
if iter_counter % test_freq == 0:
LOG_INFO(' Testing...')
for tx, ty in data_iterator(test_x, test_y, batch_size, shuffle=False):
t_accuracy, t_loss = model.test(tx, ty)
test_acc.append(t_accuracy)
test_loss.append(t_loss)
msg = ' Testing iter %d, mean loss %.5f, mean acc %.5f' % (iter_counter,
np.mean(test_loss),
np.mean(test_acc))
LOG_INFO(msg)
test_acc = []
test_loss = []
```
#### File: a1exwang/theano-cnn-intro/utils.py
```python
import theano
import numpy as np
from datetime import datetime
def sharedX(X, name=None):
return theano.shared(
np.asarray(X, dtype=theano.config.floatX),
name=name,
borrow=True)
def LOG_INFO(msg):
now = datetime.now()
display_now = str(now).split(' ')[1][:-3]
print display_now + ' ' + msg
``` |
{
"source": "a1exwang/zeus",
"score": 3
} |
#### File: a1exwang/zeus/check_task.py
```python
import sys
import sqlite3
import binascii
import re
def check_task(task_db, info_hash_or_uri):
m = re.match(r'urn:btih:(\w+)', info_hash_or_uri)
if m:
info_hash = m.groups(1)
else:
info_hash = info_hash_or_uri
conn = sqlite3.connect(task_db)
c = conn.cursor()
bin_info_hash = binascii.unhexlify(info_hash)
rows = c.execute(
"SELECT COUNT(*) FROM BtTask WHERE InfoId = ?;", (bin_info_hash,)
)
for row in rows.fetchone():
_n, = row
if int(_n) > 0:
return 0
c.close()
return 1
if __name__ == '__main__':
if len(sys.argv) != 3:
print("USAGE python ./check_task.py TaskDb.dat INFO_HASH_OR_URI")
sys.exit(2)
task_db, info_hash_or_uri = sys.argv[1:3]
sys.exit(check_task(task_db, info_hash_or_uri))
``` |
{
"source": "a1falcon/blincodes",
"score": 3
} |
#### File: blincodes/codes/tools.py
```python
from blincodes import matrix, vector
def make_generator(mat):
"""Return the generator matrix from general matrix `mat`."""
return matrix.Matrix(
(row.value for row in mat.diagonal_form if row.value),
mat.ncolumns)
def make_parity_check(mat):
"""Return the parity-check matrix from generator matrix `mat`."""
return mat.orthogonal
def hadamard_product(generator_a, generator_b):
"""Evaluate the generator matrix of Hadamard product code.
:param: Matrix generator_a - the generator matrix of the first code;
:param: Matrix generator_b - the generator matrix of the second code.
:return: Matrix generator - the generator matrix of Hadamard product of
the first and the second codes.
"""
hadamard_dict = {} # {index of the fist 1 in the row: row}
hadamard = []
for row_a in generator_a:
for row_b in generator_b:
row = row_a * row_b
test_row = row.copy()
for i, row_h in hadamard_dict.items():
if test_row[i]:
test_row += row_h
if test_row.value:
hadamard_dict[test_row.support[0]] = test_row
hadamard.append(row)
return matrix.from_vectors(hadamard)
def intersection(generator_a, generator_b):
"""Return generator matrix of intersection of two codes."""
return make_parity_check(matrix.concatenate(
make_parity_check(generator_a),
make_parity_check(generator_b), by_rows=True))
def union(generator_a, generator_b):
"""Return generator matrix of union of two codes."""
return make_generator(matrix.concatenate(
generator_a, generator_b, by_rows=True))
def puncture(generator, columns=None, remove_zeroes=False):
"""Return generator matrix of punctured code.
Punctured code is code obtaining by set the positions
with indexes from `ncolumns` of every codeword to zero.
Punctured code is NOT subcode of original code!
"""
if not columns:
columns = []
mask = vector.from_support_supplement(generator.ncolumns, columns)
puncture_matrix = matrix.Matrix(
((row * mask).value for row in generator),
generator.ncolumns).diagonal_form
if remove_zeroes:
return matrix.Matrix(
(row.value for row in puncture_matrix if row.value),
generator.ncolumns).submatrix(
columns=(i for i in range(generator.ncolumns)
if i not in columns))
return matrix.Matrix(
(row.value for row in puncture_matrix if row.value),
generator.ncolumns)
def truncate(generator, columns=None, remove_zeroes=False):
"""Return generator matrix of truncated code.
Truncated code is code obtaining by choose codewords which
have coordinates with indexes from `columns` is zero.
Unlike the punctured code truncated code is a subcode of original code.
NOTE! If remove_zeroes is set to True the truncated codes would not be
a subcode of the original code.
"""
if not columns:
columns = []
mask = vector.from_support(generator.ncolumns, columns)
trunc = matrix.Matrix(
(row.value for row in generator.gaussian_elimination(columns)
if not (row * mask).value),
generator.ncolumns).echelon_form
trunc = matrix.Matrix((row.value for row in trunc if row.value),
generator.ncolumns)
if remove_zeroes:
return trunc.submatrix(
columns=(i for i in range(generator.ncolumns)
if i not in columns))
return trunc
def hull(generator):
"""Evaluate the generator matrix of the code's hull.
The code's hull is intersection of code and it's dual.
"""
return make_parity_check(
matrix.concatenate(generator,
make_parity_check(generator),
by_rows=True))
def iter_codewords(generator):
"""Iterate over all codewords of code."""
for i in range(1 << generator.nrows):
yield (matrix.Matrix([i], generator.nrows) * generator)[0]
def spectrum(generator):
"""Return the spectrum of code."""
spec = {i: 0 for i in range(generator.ncolumns + 1)}
for vec in iter_codewords(generator):
spec[vec.hamming_weight] += 1
return spec
def encode(generator, vec):
"""Encode the `vec` using generator matrix `generator` of code."""
try:
return (matrix.from_vectors([vec]) * generator)[0]
except TypeError:
pass
except IndexError:
return None
try:
return (vec * generator)[0]
except IndexError:
pass
return None
def syndrome(parity_check, vec):
"""Return the syndrome of `vec` using parity check matrix."""
try:
return (parity_check * matrix.from_vectors([vec]).T).T[0]
except TypeError:
pass
except IndexError:
return None
try:
return (parity_check * vec.T).T[0]
except IndexError:
pass
return None
``` |
{
"source": "a1fonsofuentes/Tareas",
"score": 4
} |
#### File: a1fonsofuentes/Tareas/debugging.py
```python
def divisores(num):
divisores = []
for i in range(1, num + 1):
if num % i == 0:
divisores.append(i)
return divisores
def run():
num = int(input("Ingresa un número: "))
print(divisores(num))
print("Finalizó el programa")
if __name__ == "__main__":
run()
``` |
{
"source": "a1fred/carnival",
"score": 2
} |
#### File: carnival/carnival/cli.py
```python
import os
import sys
import typing
import collections
import click
import colorama # type: ignore
from colorama import Fore, Style
from carnival.tasks_loader import get_tasks
if typing.TYPE_CHECKING:
from carnival.task import TaskBase
carnival_tasks_module = os.getenv("CARNIVAL_TASKS_MODULE", "carnival_tasks")
def is_completion_script(complete_var: str) -> bool:
return os.getenv(complete_var, None) is not None
task_types: typing.OrderedDict[str, typing.Type["TaskBase"]] = collections.OrderedDict()
def except_hook(type: typing.Type[typing.Any], value: typing.Any, traceback: typing.Any) -> None:
print(f"{Fore.RED}{type.__name__}: {value} {Fore.RESET}\nYou can use --debug flag to see full traceback.")
def main() -> int:
"""
>>> $ poetry run python -m carnival --help
>>> Usage: python -m carnival [OPTIONS] {help|test}...
>>> Options:
>>> --debug Turn on debug mode
>>> --no_validate Disable step validation
>>> --help Show this message and exit.
"""
global task_types
complete_var = os.getenv("COMPLETE_VAR", "_CARNIVAL_COMPLETE")
task_types = get_tasks(
carnival_tasks_module=carnival_tasks_module,
for_completion=is_completion_script(complete_var)
)
@click.command()
@click.option('--debug', is_flag=True, default=False, help="Turn on debug mode")
@click.option('--no-validate', is_flag=True, default=False, help="Disable step validation")
@click.argument('tasks', required=True, type=click.Choice(list(task_types.keys())), nargs=-1)
def cli(debug: bool, no_validate: bool, tasks: typing.Iterable[str]) -> int:
colorama.init()
if debug is True:
print(f"Debug mode {Style.BRIGHT}{Fore.YELLOW}ON{Fore.RESET}{Style.RESET_ALL}")
else:
sys.excepthook = except_hook
if no_validate:
print(f"Step validation {Style.BRIGHT}{Fore.YELLOW}OFF{Fore.RESET}{Style.RESET_ALL}")
# Build chain and validate
has_errors = False
task_chain: typing.List["TaskBase"] = []
for task_class_str in tasks:
task = task_types[task_class_str](no_validate=no_validate)
is_valid = task.validate()
if is_valid is False:
has_errors = True
task_chain.append(task)
if has_errors:
return 1
# Run
for task in task_chain:
task.run()
return 0
return cli(complete_var=complete_var) # type: ignore
``` |
{
"source": "a1fred/commands",
"score": 3
} |
#### File: a1fred/commands/example.py
```python
import os
from management_commands import Command, main
class Ls(Command):
def add_arguments(self, parser):
parser.add_argument('-1', action='store_true', dest='onecol')
parser.add_argument('path')
def handle(self, onecol, path, **kwargs) -> None:
sep = ', '
if onecol:
sep = '\n'
print(sep.join(os.listdir(path)))
if __name__ == '__main__':
main(commands=[Ls()])
``` |
{
"source": "a1fred/cryptoportfolio",
"score": 3
} |
#### File: interfaces/exchanges/bittrex.py
```python
from decimal import Decimal
from cryptoportfolio.lib.bittrex import Bittrex, API_V2_0
from cryptoportfolio.interfaces.base import Address
class BittrexWallet(Address):
decimal_places = 18
__api_response = None
def __init__(self, api_key, api_secret, **kwargs):
self.bittrex = Bittrex(api_key, api_secret, api_version=API_V2_0)
super(BittrexWallet, self).__init__(**kwargs)
def balance_request(self):
resp = self.bittrex.get_balances()
if 'result' in resp and resp['result']:
for curr in resp['result']:
symbol = curr['Currency']['Currency']
balance = Decimal(curr['Balance']['Balance'])
if balance != 0.0:
yield (symbol, balance)
def _get_addr_coins_and_tokens_balance(self):
for balance_item in self.balance_request():
yield balance_item
```
#### File: interfaces/miningpools/f2pool.py
```python
from decimal import Decimal
import requests
from cryptoportfolio.interfaces.base import Address
class F2PoolWallet(Address):
decimal_places = 18
symbol = None
f2pool_currecnices_mapping = {
'bitcoin': "BTC",
'litecoin': "LTC",
'etc': "ETC",
'eth': "ETH",
'zec': "ZEC",
'sc': "SC",
'monero': "XMR",
'dash': "DASH",
}
def __init__(self, currency, user, **kwargs):
assert currency in self.f2pool_currecnices_mapping.keys()
self.symbol = self.f2pool_currecnices_mapping[currency]
self.currency = currency
self.user = user
super(F2PoolWallet, self).__init__(**kwargs)
def _get_addr_coins_and_tokens_balance(self):
result = requests.get("http://api.f2pool.com/%s/%s" % (self.currency, self.user)).json()
return [
(self.symbol, Decimal(result['balance']))
]
```
#### File: interfaces/wallets/cardano.py
```python
from decimal import Decimal
import requests
from cryptoportfolio.interfaces.base import CryptoCoinWallet
class CardanoWallet(CryptoCoinWallet):
decimal_places = 18
def _get_addr_coins_and_tokens_balance(self):
balance_data = requests.get("https://cardanoexplorer.com/api/addresses/summary/%s" % self.addr).json()
balance = balance_data['Right']['caBalance']['getCoin']
return [
("ADA", Decimal(balance) / Decimal(1000000)),
]
```
#### File: interfaces/wallets/hyperstake.py
```python
import requests
from decimal import Decimal
from cryptoportfolio.interfaces.base import CryptoCoinWallet
class HyperstakeWallet(CryptoCoinWallet):
"""
Uses undocumented request
"""
decimal_places = 18
def _get_addr_coins_and_tokens_balance(self):
data = requests.get(
"https://prohashing.com/explorerJson/getAddress?address=%s&coin_id=184" % self.addr
).json()
return [
("HYP", Decimal(data['balance'])),
]
```
#### File: interfaces/wallets/magi.py
```python
from decimal import Decimal
import requests
from cryptoportfolio.interfaces.base import CryptoCoinWallet
class MagiWallet(CryptoCoinWallet):
decimal_places = 18
def _get_addr_coins_and_tokens_balance(self):
# type: (...) -> list
balance = requests.get(
"https://chainz.cryptoid.info/xmg/api.dws?q=getbalance&a=%s" % self.addr
).text
return [
("XMG", Decimal(balance))
]
```
#### File: interfaces/wallets/nem.py
```python
import requests
from decimal import Decimal
from cryptoportfolio.interfaces.base import CryptoCoinWallet
class NemWallet(CryptoCoinWallet):
"""
Uses undocumented request
"""
decimal_places = 18
def _get_addr_coins_and_tokens_balance(self):
data = requests.post(
"http://explorer.ournem.com/account/detail",
headers={
"Content-Type": "application/json",
},
data='{"address": "%s"}' % str(self.addr).replace('-', '')
).json()
if data == 'Not Found':
return [
("XEM", Decimal("0.0")),
]
return [
("XEM", Decimal(data['balance']) * Decimal("0.000001")),
]
```
#### File: lib/convert/cryptocompare.py
```python
import requests
from decimal import Decimal
def get_price_usd(symbol):
"""
:type symbol: str
"""
data = requests.get("https://min-api.cryptocompare.com/data/pricemulti?fsyms=%s&tsyms=USD" % symbol.upper()).json()
if symbol.upper() in data:
return Decimal(data[symbol.upper()]['USD'])
else:
return Decimal('0')
```
#### File: cryptoportfolio/cryptoportfolio/main.py
```python
import sys
from yaml import BaseLoader
from decimal import Decimal
from cryptoportfolio.cli.printers import (
result_iterator,
summarize_cells,
hide_zeros_cells,
hide_usd_zeros_cells,
sort_cells,
print_results,
)
from cryptoportfolio.lib.convert import CONVERTORS_MAPPING
def main(settings_path, summarize, hide_zeros, hide_usd_zeros, sort, print_all_total, print_group_total):
import yaml
settings = yaml.load(settings_path, BaseLoader)
settings_path.close()
try:
defaults = settings.get('defaults', {})
groups = settings.get('groups', {})
tickers = settings.get('tickers', {})
converting = settings.get('converting', 'coinmarketcap')
except AttributeError:
# AttributeError: 'str' object has no attribute 'get' raises if wrong file type
print("Wrong configuration file type")
return
if converting not in CONVERTORS_MAPPING:
print("Unknown convetror %s, possible values are: %s" % (
converting,
"|".join(CONVERTORS_MAPPING.keys())
))
sys.exit(1)
else:
get_price_usd = CONVERTORS_MAPPING[converting]
if not groups and not tickers:
print("No groups and no tickers is defined. Exiting.")
results = result_iterator(groups.items(), defaults, get_price_usd)
if summarize:
results = summarize_cells(results)
if hide_zeros:
results = hide_zeros_cells(results)
if hide_usd_zeros:
results = hide_usd_zeros_cells(results)
if sort:
results = sort_cells(results)
print_results(results, print_all_total=print_all_total, print_group_total=print_group_total)
if tickers:
print("\nTickers:")
for symbol in tickers:
print(" * %-4s $%s" % (symbol, get_price_usd(symbol).quantize(Decimal('0.00'))))
def cli():
import argparse
parser = argparse.ArgumentParser(description='Show cryptocoins portfilio.')
parser.add_argument('settings_path', type=argparse.FileType('r'))
parser.add_argument('-s', '--summarize', action='store_true', help="Summarize same currencies in one row")
parser.add_argument('-z', '--hide-zeros', action='store_true', help="Hide zero crypto balances")
parser.add_argument('--sort', action='store_true', help="Sort by USD balance")
parser.add_argument('--hide-usd-zeros', action='store_true', help="Hide zero USD balances")
parser.add_argument('-T', '--print-all-total', action='store_true', help="Print all total USD")
parser.add_argument('-t', '--print-group-total', action='store_true', help="Print group total USD")
args = parser.parse_args()
main(**vars(args))
```
#### File: cryptoportfolio/tests/test_cli.py
```python
import unittest
class CliTests(unittest.TestCase):
test_conf = './sample.yml'
def test_flags(self):
from cryptoportfolio.main import main
main(
open(self.test_conf, 'r'),
summarize=True,
hide_zeros=True,
hide_usd_zeros=True,
sort=True,
print_all_total=True,
print_group_total=True,
)
``` |
{
"source": "a1fred/django-db-logger",
"score": 2
} |
#### File: django-db-logger/indb_logger/models.py
```python
from django.db import models
class DBLogEntry(models.Model):
time = models.DateTimeField(auto_now_add=True)
level = models.CharField(max_length=10)
message = models.TextField()
def __str__(self):
return str(self.time.strftime("%d.%B.%Y %H:%M"))+" "+str(self.level)
``` |
{
"source": "a1fred/git-barry",
"score": 3
} |
#### File: git-barry/gitbarry/main.py
```python
import sys
from gitbarry.utils.git import assert_is_git_repo
from gitbarry.reasons import REASONS
def usage(need_exit=True):
print("\nUsage:")
print("git barry %s" % '|'.join(REASONS.keys()))
if need_exit:
sys.exit(0)
def main(reason, *args):
if reason not in REASONS.keys():
print("Available reasons are: %s" % ", ".join(REASONS.keys()))
usage()
reason_inst = REASONS[reason].Reason(*args)
errors = reason_inst.validate()
if len(errors):
for err in errors:
print(" - %s" % err)
sys.exit(7)
else:
reason_inst.run()
def run():
if len(sys.argv[1:]) < 1:
usage()
main(*sys.argv[1:])
if __name__ == "__main__":
assert_is_git_repo()
run()
```
#### File: gitbarry/reasons/finish.py
```python
from .abstract import AbstractReason
from gitbarry.config import settings
from gitbarry.utils import git, shortcuts
class Reason(AbstractReason):
def usage(self):
print("git barry finish options:")
print(" git barry finish")
def validate(self):
if len(self.args) != 0:
return ['use git barry start help to see options.']
return []
def run(self):
shortcuts.ensure_current_branch_is_taskbranch()
task_type = shortcuts.get_current_task_type()
task_params = settings["TASKS"][task_type]
finish_action = task_params['finish-action']
action_module = settings['FINISH_ACTIONS'][finish_action]
action = action_module.Action(task_params)
action.run()
switch_to = task_params.get('finish-branch', task_params['branch-from'])
git.swith_to_branch(switch_to)
print("Done")
```
#### File: gitbarry/reasons/start.py
```python
from .abstract import AbstractReason
from gitbarry.config import settings
from gitbarry.utils import git
tasks = settings['TASKS'].keys()
class Reason(AbstractReason):
def usage(self):
print("git barry start options:")
print(" git barry start %s <feature_name>" % "|".join(tasks))
def validate(self):
if len(self.args) != 2:
return ['use "git barry start help" to see options.']
task, name = self.args
errors = []
if task not in tasks:
errors.append("Unknown task for reason 'start': %s" % task)
return errors
def run(self):
task, name = self.args
task_params = settings["TASKS"][task]
new_branch_name = '/'.join([task, name])
git.swith_to_branch(task_params['branch-from'])
git.create_new_branch(new_branch_name)
git.merge(task_params['branch-from'])
print("Done")
```
#### File: gitbarry/utils/git.py
```python
import os
import sys
import sh
def assert_is_git_repo():
try:
assert os.path.isdir('.git')
assert os.path.isfile('.git/HEAD')
except AssertionError:
print("This is not git repository")
sys.exit(2)
def get_current_branch() -> str:
current_branch = sh.git('rev-parse', '--abbrev-ref', 'HEAD')
return current_branch.strip()
def get_local_branches() -> list:
branches = []
branches_raw = sh.git("for-each-ref", "--format='%(refname)'", "refs/heads/")
for branch_name_long in branches_raw.split('\n'):
if branch_name_long:
branches.append(branch_name_long.replace('refs/heads/', '').replace("'", ""))
return branches
def ensure_branch_not_exists(branch_name: str):
if branch_name in get_local_branches():
print("branch %s already exists!" % branch_name)
sys.exit(4)
def ensure_branch_exists(branch_name):
if branch_name not in get_local_branches():
print("branch %s not exists!" % branch_name)
sys.exit(4)
def create_new_branch(branch_name: str, push_to_origin=True):
ensure_branch_not_exists(branch_name)
print("Creating new branch %s" % branch_name)
sh.git("checkout", '-b', branch_name)
if push_to_origin:
sh.git("push", "-u", "origin", branch_name)
print("Local branch pushed to origin")
def swith_to_branch(branch_name: str):
ensure_branch_exists(branch_name)
sh.git('checkout', branch_name)
try:
assert get_current_branch() == branch_name
except:
print("Cant checkout branch. %s not %s" % (get_current_branch(), branch_name))
def merge(from_branch):
ensure_branch_exists(from_branch)
output = sh.git("merge", from_branch)
print(output)
def delete_branch(branch_name):
ensure_branch_exists(branch_name)
output = sh.git("branch", '-D', branch_name)
print(output)
def tag(tagname):
output = sh.git("tag", "-f", tagname)
print(output)
```
#### File: gitbarry/utils/shortcuts.py
```python
import sys
import importlib
from .git import get_current_branch
def import_from_str(module: str):
return importlib.import_module(module)
def get_current_task_type():
from gitbarry.config import settings
current_branch = get_current_branch()
for task_prefix in settings['TASKS'].keys():
if current_branch.startswith('%s/' % task_prefix):
return task_prefix
return False
def ensure_current_branch_is_taskbranch():
current_task_type = get_current_task_type()
if current_task_type is False:
print("Current branch not looks like barry task branch.")
sys.exit(5)
``` |
{
"source": "a1fred/guitar_gammas",
"score": 3
} |
#### File: harmony_tools/core/colors.py
```python
COLOR_BLUE = '\033[0;34m'
COLOR_GREEN = '\033[0;32m'
COLOR_CYAN = '\033[0;36m'
COLOR_RED = '\033[0;31m'
COLOR_PURPLE = '\033[0;35m'
COLOR_BROWN = '\033[0;33m'
COLOR_YELLOW = '\033[1;33m'
COLOR_GRAY = '\033[1;30m'
COLOR_RESET = '\033[0m'
FG_COLORS = [
# COLOR_BLUE,
COLOR_GREEN,
# COLOR_CYAN,
# COLOR_RED,
# COLOR_PURPLE,
# COLOR_BROWN,
# COLOR_YELLOW,
]
def next_color(color):
assert color in FG_COLORS
index = FG_COLORS.index(color)
index += 1
try:
return FG_COLORS[index]
except IndexError:
index = 0
return FG_COLORS[index]
def c(string, color):
global COLOR_RESET
return f"{color}{string}{COLOR_RESET}"
```
#### File: a1fred/guitar_gammas/setup.py
```python
from setuptools import setup, find_packages
from setuptools.command.test import test as test_cmd
from os import path
version = '0.1'
# read the contents of your README file
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
class Test(test_cmd):
def run_tests(self):
import coverage
cov = coverage.Coverage()
cov.start()
res = super().run_tests()
cov.stop()
cov.report()
cov.html_report()
cov.xml_report()
return res
setup(
name='harmony_tools',
version=version,
packages=find_packages(),
package_dir={'harmony_tools': 'harmony_tools'},
entry_points={
'console_scripts': ['harmony_tools=harmony_tools.cli:main'],
},
license='MIT',
url='https://github.com/a1fred/harmony_tools',
author='a1fred',
author_email='<EMAIL>',
classifiers=[
'Environment :: Console',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3.7',
],
test_suite="tests",
cmdclass={
'test': Test,
},
tests_require=[
'coverage'
],
long_description=long_description,
long_description_content_type='text/markdown',
)
```
#### File: guitar_gammas/tests/test_core.py
```python
import unittest
from harmony_tools.core import note_operations
from harmony_tools import notes as n
class TestCore(unittest.TestCase):
def test_note_operations(self):
self.assertEqual(note_operations.half_tone(n.A), n.A_sharp)
self.assertEqual(note_operations.tone(n.A), n.B)
self.assertEqual(note_operations.tone_and_half(n.C), n.D_sharp)
``` |
{
"source": "A1fus/programmingWithPython",
"score": 4
} |
#### File: programmingWithPython/week4-class/binarySearch.py
```python
def load_a_list_of_ints():
"""
Loads a list of integers by first asking for its length.
"""
L = list()
n = int(input("The number of list elements: "))
for k in range(n):
L.append(int(input(str(k+1) + ". element: ")))
return L
def search(L, el):
"""
Returns the index of `el` in `L` if it exists, or `None` otherwise.
"""
# We're observing the part of the list between indices `left` and `right`.
# When we start, it's the whole list, so from `left=0` to `right=len(L)-1`.
left = 0
right = len(L)-1
# The length of the observed part of the list is `right-left+1`, so
# the list is NOT empty as long as `right-left+1 > 0`, i.e.,
# the list is NOT empty as long as `right>left-1`, i.e.,
# the list is NOT empty as long as `right>=left`.
while right >= left:
mid = (left + right) // 2
if L[mid] == el:
for i in range(right, mid, -1):
if L[i] == el:
return i
return mid
if L[mid] < el:
left = mid + 1
else:
right = mid - 1
return None
def main():
x = load_a_list_of_ints()
y = int(input("Value to be found: "))
print(search(x, y))
main()
```
#### File: programmingWithPython/week5-class/stringMod.py
```python
def char_mod(c, c_del, c_double):
"""Takes a character `c` and either doubles or removes it"""
if c == c_del:
return ""
if c == c_double:
c = c+c
return c
return c
def del_double(s, char_del=None, char_double=None):
"""Takes a string, removes all the occurences of `char_del`, doubles all
occurences of `char_double` and returns a new string.
Parameters
----------
s : string
String to be modified.
char_del : string
Character to be deleted.
char_double : string
Character to be doubled.
Returns
-------
s_mod : string
`s` after the characters have been removed and doubled.
"""
# Initialisations
s_list = list(s)
s_mod = ""
if char_del != None and len(char_del) != 1:
return None
if char_double != None and len(char_double) != 1:
return None
for i in range(len(s_list)):
s_mod = s_mod + char_mod(s_list[i], char_del, char_double)
return s_mod
def main():
x = "abcdefghb"
de = "d"
do = "b"
print(del_double(x, de, do))
main()
```
#### File: programmingWithPython/week5-class/timeDiff.py
```python
def str2time(s):
"""Takes a string of the format "hh:mm:ss" and converts it to an integer
value of the seconds since the start of the day.
Parameters
----------
s : str
Time, in the format "hh:mm:ss".
Returns
-------
t : int
Time, in the seconds since the start of the day.
"""
#Initialisations
s_list = list(s)
for i in range(len(s_list)):
if s_list[i] == ":":
continue
s_list[i] = int(s_list[i])
t = ((s_list[0]*10 + s_list[1])*3600 + (s_list[3]*10 + s_list[4])*60
+ s_list[6]*10 + s_list[7])
return t
def time2str(t):
"""Takes a value of time as a number of seconds and returns it in the form
"hh:mm:ss".
Parameters
----------
t : int
Time in seconds.
Returns
-------
s : str
Time in the form "hh:mm:ss". Hours omitted if there are none.
"""
hours = t // 3600
min_sec = t % 3600
mins = min_sec // 60
sec = min_sec % 60
if hours < 10 or hours != 0:
hours = "0" + str(hours)
else:
hours = str(hours)
if mins < 10:
mins = "0" + str(mins)
else:
mins = str(mins)
if sec < 10:
sec = "0" + str(sec)
else:
sec = str(sec)
if hours == "00":
s = "" + mins + ":" + sec
else:
s = "" + hours + ":" + mins + ":" + sec
return s
def time_diff(t1, t2):
"""Takes 2 times and returns the difference.
Parameters
----------
t1 : str
Time in the form 'hh:mm:ss'.
t2 : str
Time in the form 'hh:mm:ss'.
Returns
-------
time_diff : str
The difference between `t1` and `t2` in the form 'hh:mm:ss'.
"""""
time_diff = time2str(abs(str2time(t1) - str2time(t2)))
return time_diff
def main():
print(time_diff("17:19:23", "17:11:07"))
main()
```
#### File: programmingWithPython/week6/firstPlusLastPrime.py
```python
def first_prime(L):
"""Returns the first prime in the list L.
Parameters
----------
L : list
A list of primes.
Returns
-------
firstPrime : int
The first prime in the list L.
"""
for i in range(len(L)):
if L[i] <= 1:
continue
for j in range(2, L[i]):
if L[i] % j == 0:
break
else:
firstPrime = L[i]
return firstPrime
else:
return 0
def last_prime(L):
"""Returns the last prime in the list L.
Parameters
----------
L : list
A list of primes.
Returns
-------
lastPrime : int
The first prime in the list L.
"""
for i in range(len(L)-1, 0, -1):
if L[i] <= 1:
continue
for j in range(2, L[i]):
if L[i] % j == 0:
break
else:
lastPrime = L[i]
return lastPrime
else:
return 0
def first_plus_last_prime(L):
"""Adds together the first and last primes in the list `l`. Returns
0 if there are no primes in the list, if there is only one, the
function returns double its value.
Parameters
----------
L : list
The list of integers.
Returns
-------
primeSum : int
Sum of the first and last primes in `L`.
"""
primeSum = first_prime(L) + last_prime(L)
return primeSum
def main():
x = [12, 12, 13, 15, 16, 16]
print(first_plus_last_prime(x))
main()
``` |
{
"source": "a1ien/sol2",
"score": 2
} |
#### File: a1ien/sol2/bootstrap.py
```python
import ninja_syntax
import os, sys, glob, re
import itertools
import argparse
import urllib.request
# utilities
def flags(*args):
return ' '.join(itertools.chain(*args))
def includes(l):
return ['-I"{}"'.format(x) for x in l]
def library_includes(l):
return ['-L"{}"'.format(x) for x in l]
def libraries(l):
return ['-l{}'.format(x) for x in l]
def dependencies(l):
return ['-isystem"{}"'.format(x) for x in l]
def object_file(f):
(root, ext) = os.path.splitext(f)
return os.path.join(objdir, root + '.o')
def replace_extension(f, e):
(root, ext) = os.path.splitext(f)
return root + e
# Default install dir
install_dir = os.path.join('/usr', 'include') if 'linux' in sys.platform else 'include'
# Compiler: Read from environment or defaulted
cxx = os.environ.get('CXX', "g++")
# command line stuff
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true', help='compile with debug flags')
parser.add_argument('--cxx', metavar='<compiler>', help='compiler name to use (default: env.CXX=%s)' % cxx, default=cxx)
parser.add_argument('--cxx-flags', help='additional flags passed to the compiler', default='')
parser.add_argument('--ci', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--testing', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--lua-version', help='Lua version, e.g. lua53', default='lua53')
parser.add_argument('--lua-lib', help='lua library name (without the lib on *nix).', default='lua')
parser.add_argument('--lua-dir', metavar='<dir>', help='directory lua is in with include and lib subdirectories')
parser.add_argument('--install-dir', metavar='<dir>', help='directory to install the headers to', default=install_dir);
parser.epilog = """In order to install sol, administrative privileges might be required.
Note that installation is done through the 'ninja install' command. To uninstall, the
command used is 'ninja uninstall'. The default installation directory for this
system is {}""".format(install_dir)
args = parser.parse_args()
# prepare paths and files
catch_file = os.path.join('external', 'Catch', 'include', 'catch.hpp')
os.makedirs(os.path.dirname(catch_file), exist_ok=True)
urllib.request.urlretrieve("https://github.com/catchorg/Catch2/releases/download/v2.0.1/catch.hpp", catch_file)
# general variables
include = [ '.', './include' ]
depends = [os.path.join('external', 'Catch', 'include')]
cxxflags = [ '-Wno-unknown-warning', '-Wno-unknown-warning-option', '-Wall', '-Wextra', '-Wpedantic', '-pedantic', '-pedantic-errors', '-Wno-noexcept-type', '-std=c++14', '-ftemplate-depth=1024' ]
cxxflags.extend([p for p in re.split("( |\\\".*?\\\"|'.*?')", args.cxx_flags) if p.strip()])
example_cxxflags = [ '-Wno-unknown-warning', '-Wno-unknown-warning-option', '-Wall', '-Wextra', '-Wpedantic', '-pedantic', '-pedantic-errors', '-Wno-noexcept-type', '-std=c++14', '-ftemplate-depth=1024' ]
example_cxxflags.extend([p for p in re.split("( |\\\".*?\\\"|'.*?')", args.cxx_flags) if p.strip()])
ldflags = []
script_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
sol_dir = os.path.join(script_dir, 'sol')
sol_file = os.path.join(script_dir, 'sol.hpp')
copy_command = 'cp -rf {} $in && cp -f {} $in'.format(sol_dir, sol_file)
remove_command = 'rm -rf {} && rm -f {}'.format(os.path.join(args.install_dir, 'sol'), os.path.join(args.install_dir, 'sol.hpp'))
if sys.platform == 'win32':
copy_command = 'robocopy /COPYALL /E {} $in && robocopy /COPYALL {} $in'.format(sol_dir, sol_file)
remove_command = 'rmdir /S /Q {} && erase /F /S /Q /A {}'.format(os.path.join(args.install_dir, 'sol'),
os.path.join(args.install_dir, 'sol.hpp'))
if not args.lua_lib:
args.lua_lib = 'lua'
if args.debug:
cxxflags.extend(['-g', '-O0'])
else:
cxxflags.extend(['-DNDEBUG', '-O3'])
example_cxxflags.extend(['-g', '-O0'])
if args.lua_dir:
include.extend([os.path.join(args.lua_dir, 'include')])
ldflags.extend(library_includes([os.path.join(args.lua_dir, 'lib')]))
if 'linux' in sys.platform:
lua_version = os.environ.get('LUA_VERSION', args.lua_version)
if re.match(r'lua5[1-3]', lua_version):
# Using normal lua
lua_lib = lua_version[:-1] + '.' + lua_version[-1]
lua_incl = lua_lib
elif re.match(r'luajit5[1-3]:i386', lua_version):
# luajit:i386
lua_incl = 'luajit-2.0'
lua_lib = lua_version[:-7] + '-' + lua_version[-7] + '.' + lua_version[-6]
cxxflags.append('-m32')
include.extend(['/usr/include/luajit-2.0/', '/usr/local/include/luajit-2.0/'])
elif re.match(r'luajit5[1-3]', lua_version):
# luajit
lua_incl = 'luajit-2.0' # I don't get this..
lua_lib = lua_version[:-2] + '-' + lua_version[-2] + '.' + lua_version[-1]
include.extend(['/usr/include/luajit-2.0/', '/usr/local/include/luajit-2.0/'])
else:
raise Exception('Unknown lua_version={}' % lua_version)
include.extend(['/usr/include/' + lua_incl, '/usr/local/include/' + lua_incl])
ldflags.extend(library_includes(['/usr/local/lib']))
ldflags.extend(libraries([lua_lib]))
elif 'darwin' in sys.platform:
# OSX
lua_version = os.environ.get('LUA_VERSION', args.lua_version)
if re.match(r'lua5[1-3]', lua_version):
# Using normal lua
lua_incl = lua_version[:-1] + '.' + lua_version[-1]
lua_lib = lua_version[:-2] + '.' + lua_version[-2] + '.' + lua_version[-1]
elif re.match(r'luajit', lua_version):
# luajit
lua_incl = 'luajit-2.0'
lua_lib = 'luajit'
ldflags.extend(['-pagezero_size 10000', '-image_base 100000000'])
elif re.match(r'luajit5[1-3]', lua_version):
# luajit
lua_incl = 'luajit-2.0'
lua_lib = lua_version[:-2] + '-' + lua_version[-2] + '.' + lua_version[-1]
ldflags.extend(['-pagezero_size 10000', '-image_base 100000000'])
else:
raise Exception('Unknown lua_version={}' % lua_version)
depends.extend(['/usr/include/' + lua_incl, '/usr/local/include/' + lua_incl])
ldflags.extend(library_includes(['/usr/local/lib']))
ldflags.extend(libraries([lua_lib]))
else:
ldflags.extend(libraries([args.lua_lib]))
if args.testing:
cxxflags.append('-Wmissing-declarations')
if 'linux' in sys.platform:
cxxflags.append('-pthread')
ldflags.extend(libraries(['dl']))
builddir = 'bin'
objdir = 'obj'
if 'win32' in sys.platform:
tests = os.path.join(builddir, 'tests.exe')
else:
tests = os.path.join(builddir, 'tests')
tests_inputs = []
tests_object_files = []
for f in glob.glob('tests/test*.cpp'):
obj = object_file(f)
tests_inputs.append(f)
tests_object_files.append(obj)
examples = []
examples_input = []
def add_example (f):
if 'win32' in sys.platform:
example = os.path.join(builddir, replace_extension(f, '.exe'))
example = example.replace('/', '\\');
else:
example = os.path.join(builddir, replace_extension(f, ''))
example = example.replace('\\', '/');
#if ' ' in example:
# example = '"' + example + '"'
examples_input.append(f)
examples.append(example)
for f in glob.glob('examples/*.cpp'):
add_example(f)
for f in glob.glob('examples/tutorials/quick_n_dirty/**.cpp'):
add_example(f)
# ninja file
ninja = ninja_syntax.Writer(open('build.ninja', 'w'))
# variables
ninja.variable('ninja_required_version', '1.3')
ninja.variable('builddir', 'bin')
ninja.variable('cxx', args.cxx)
ninja.variable('cxxflags', flags(cxxflags + includes(include) + dependencies(depends)))
ninja.variable('example_cxxflags', flags(example_cxxflags + includes(include) + dependencies(depends)))
ninja.variable('ldflags', flags(ldflags))
ninja.newline()
# rules
ninja.rule('bootstrap', command = ' '.join(['python'] + sys.argv), generator = True)
ninja.rule('compile', command = '$cxx -MMD -MF $out.d -c $cxxflags -Werror $in -o $out',
deps = 'gcc', depfile = '$out.d',
description = 'compiling $in to $out')
ninja.rule('link', command = '$cxx $cxxflags $in -o $out $ldflags', description = 'creating $out')
ninja.rule('tests_runner', command = tests)
ninja.rule('examples_runner', command = 'cmd /c ' + (' && '.join(examples)) if 'win32' in sys.platform else ' && '.join(examples) )
ninja.rule('example', command = '$cxx $example_cxxflags -MMD -MF $out.d $in -o $out $ldflags',
deps = 'gcc', depfile = '$out.d',
description = 'compiling example $in to $out')
ninja.rule('installer', command = copy_command)
ninja.rule('uninstaller', command = remove_command)
ninja.newline()
# builds
ninja.build('build.ninja', 'bootstrap', implicit = sys.argv[0])
for obj, f in zip(tests_object_files, tests_inputs):
ninja.build(obj, 'compile', inputs = f)
for example, f in zip(examples, examples_input):
ninja.build(example, 'example', inputs = f)
ninja.build(tests, 'link', inputs = tests_object_files)
ninja.build('tests', 'phony', inputs = tests)
ninja.build('examples', 'phony', inputs = examples)
ninja.build('install', 'installer', inputs = args.install_dir)
ninja.build('uninstall', 'uninstaller')
ninja.build('run', 'tests_runner', implicit = 'tests')
ninja.build('run_examples', 'examples_runner', implicit = 'examples')
ninja.default('run run_examples')
``` |
{
"source": "a1k89/django-otp-provider",
"score": 2
} |
#### File: django-otp-provider/django_otp_provider/services.py
```python
from typing import Optional
from django.core.exceptions import ValidationError
from .models import Otp
from . import selectors as otp_sel
from .conf import conf
from .utils import send_code
@send_code
def generate_otp(key) -> Optional[Otp]:
"""
Generate or recreate otp.
"""
otp = otp_sel.get_otp_by_key(key=key)
if hasattr(otp, "is_allow_recreate"):
if otp.is_allow_recreate:
otp.delete()
if otp is None:
return Otp.objects.create(key=key)
raise ValidationError(conf.ERROR_TEXT)
def verify_otp(key: str, token: str, code: str):
otp = otp_sel.get_otp_by_key(token=token, key=key, raise_exc=True)
if not otp.is_allow_new_attempt:
raise ValidationError(conf.ERROR_TEXT_ATTEMTPS)
if code != otp.code:
otp.attempts -= 1
otp.save()
raise ValidationError(conf.ERROR_TEXT_CODE)
else:
otp.delete()
``` |
{
"source": "a1k89/django-rest-sms-auth",
"score": 2
} |
#### File: django-rest-sms-auth/sms_auth/admin.py
```python
from django.contrib.admin import ModelAdmin, register
from .models import *
@register(SMSMessage)
class SMSMessageAdmin(ModelAdmin):
readonly_fields = (
"created",
"phone_number",
)
def has_add_permission(self, request):
return False
@register(PhoneCode)
class PhoneCodeAdmin(ModelAdmin):
readonly_fields = (
"valid_to",
"created_at",
)
```
#### File: sms_auth/api/mixins.py
```python
from rest_framework import status
from rest_framework.response import Response
from ..conf import conf
class Error:
def __init__(self, message):
self.message = message
def render(self):
error = {"code": 1000, "message": self.message, "errors": []}
return error
class ResponsesMixin:
def simple_text_response(self, message=None):
if message is None:
message = conf.SMS_REQUEST_SUCCESS
data = {"detail": message}
return Response(data, status=status.HTTP_200_OK)
def success_objects_response(self, data):
return Response(data, status=status.HTTP_200_OK)
def error_response(self, error_message):
error = error_message
if type(error_message) is str:
error = Error(error_message).render()
return Response(error, status=status.HTTP_400_BAD_REQUEST)
```
#### File: django-rest-sms-auth/sms_auth/listeners.py
```python
import uuid
from django.db.models.signals import post_save
from django.db.transaction import on_commit
from django.dispatch import receiver
from .models import PhoneCode
from .tasks import send_sms_async
@receiver(post_save, sender=PhoneCode, dispatch_uid=uuid.uuid4())
def phone_code_post_save(sender, instance, created, **kwargs):
if created:
on_commit(lambda: send_sms_async.delay(instance.pk))
```
#### File: sms_auth/models/sms.py
```python
from django.db import models
from django.utils import timezone
from django.contrib.auth import get_user_model
from phonenumber_field.modelfields import PhoneNumberField
from ..utils import random_code, valid_to
class SMSMessage(models.Model):
"""
Save sended sms after as history
"""
created = models.DateTimeField(auto_now_add=True)
phone_number = models.CharField("Phone number", max_length=20)
def __str__(self):
return f"{self.phone_number} / {self.created}"
def __repr__(self):
return f"{self.phone_number}"
class Meta:
verbose_name = "Sms log"
verbose_name_plural = "Sms log"
class PhoneCode(models.Model):
"""
After validation save phone code instance
"""
phone_number = PhoneNumberField(unique=True)
owner = models.ForeignKey(get_user_model(),
null=True,
on_delete=models.CASCADE)
code = models.PositiveIntegerField(default=random_code)
valid_to = models.DateTimeField(default=valid_to)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ("created_at",)
verbose_name = "Phone code"
verbose_name_plural = "Phone codes"
def __str__(self):
return f"{self.phone_number} ({self.code})"
def __repr__(self):
return self.__str__()
@property
def is_allow(self):
return timezone.now() >= self.valid_to
@property
def message(self) -> str:
return f"Your auth code: {self.code}"
def save(self, *args, **kwargs):
from ..conf import conf
pretendent = self.__class__.objects.filter(
phone_number=self.phone_number
).first()
if pretendent is not None:
self.pk = pretendent.pk
if conf.SMS_AUTH_DEBUG_PHONE_NUMBER is not None:
if self.phone_number == conf.SMS_AUTH_DEBUG_PHONE_NUMBER:
self.code = conf.SMS_DEBUG_CODE
super().save(*args, **kwargs)
```
#### File: sms_auth/providers/base.py
```python
from typing import Protocol
def sms_decorator(func, to):
from ..models import SMSMessage
def wrapper():
result = func()
if result:
SMSMessage.objects.create(phone_number=to)
return wrapper
class SMSProviderClass(Protocol):
to: str
message: str
conf: dict
def send_sms(self) -> None:
pass
class SMSProvider:
def __getattribute__(self, item):
element = super().__getattribute__(item)
if callable(element) and item == "send_sms":
return sms_decorator(element, self.to)
return element
def __init__(self, to, message, conf):
self.to = to
self.message = message
self.conf = conf
def send_sms(self) -> str:
raise NotImplementedError()
```
#### File: providers/twilio/__init__.py
```python
from ..base import SMSProvider
class Twilio(SMSProvider):
def send_sms(self):
from twilio.rest import Client
client = Client(self.conf.SMS_AUTH_ACCOUNT_SID, self.conf.SMS_AUTH_AUTH_TOKEN)
message = client.messages.create(
to=f"{self.to}", from_=self.conf.SMS_PROVIDER_FROM, body=self.message
)
return message
```
#### File: django-rest-sms-auth/sms_auth/tasks.py
```python
import importlib
from .conf import conf
from .models import PhoneCode
celery_conf = importlib.import_module(conf.SMS_CELERY_FILE_NAME)
app = getattr(celery_conf, "app")
def get_provider_class():
provider = conf.SMS_PROVIDER
return provider
@app.task
def send_sms_async(identifier: int):
code_instance = PhoneCode.objects.filter(pk=identifier).first()
if code_instance:
provider_class = get_provider_class()
provider = provider_class(
to=code_instance.phone_number, message=code_instance.message, conf=conf
)
provider.send_sms()
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.