metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JobDoesburg/PUC-admin",
"score": 2
} |
#### File: pucadmin/frontoffice/forms.py
```python
from django import forms
from django.conf import settings
from django.forms import models, inlineformset_factory
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from competitions.models import (
Submission,
Competition,
Student as CompetitionStudent,
Supervisor as CompetitionSupervisor,
)
from questions.models import Question, Student as QuestionStudent
from schools.models import School
class SubmissionForm(models.ModelForm):
class Meta:
model = Submission
fields = [
"competition",
"title",
"course",
"abstract",
"document",
"school_text",
]
privacy_policy = forms.BooleanField(required=True,)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["competition"].queryset = Competition.open_for_registration()
self.fields["competition"].initial = Competition.open_for_registration().first()
self.fields["competition"].required = True
self.fields["title"].required = True
self.fields["title"].help_text = _("The title of your research")
self.fields["course"].required = True
self.fields["course"].help_text = _(
"The course to which your research relates most"
)
self.fields["abstract"].required = True
self.fields["abstract"].help_text = _(
"Provide a brief summary of your research (50 to 300 words)"
)
self.fields["document"].required = True
self.fields["school_text"].required = True
self.fields["school_text"].label = _("School")
self.fields["privacy_policy"].label = mark_safe(
_(
'The Radboud Pre-University College of Science processes the above data for the purpose of participation in the contest. The personal data will not be stored after processing. I agree with the <a href="%s" target="_blank">privacy regulations of Radboud University</a> and with the processing of the data provided by me for the purposes described above.'
)
% settings.PRIVACY_STATEMENT_URL
)
def save(self, commit=True):
instance = super().save(commit=False)
schools = School.objects.filter(name=self.cleaned_data["school_text"].lower())
if schools.exists():
instance.school = schools.first()
if commit:
instance.save()
return instance
class CompetitionStudentForm(models.ModelForm):
class Meta:
model = CompetitionStudent
fields = [
"first_name",
"last_name",
"address_1",
"address_2",
"zip",
"town",
"phone",
"email",
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["first_name"].required = True
self.fields["last_name"].required = True
self.fields["address_1"].required = True
self.fields["zip"].required = True
self.fields["town"].required = True
self.fields["email"].required = True
class CompetitionSupervisorForm(models.ModelForm):
class Meta:
model = CompetitionSupervisor
fields = ["first_name", "last_name", "phone", "email", "course"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["first_name"].required = True
self.fields["last_name"].required = True
self.fields["email"].required = True
self.fields["course"].required = True
CompetitionStudentFormset = inlineformset_factory(
parent_model=Submission,
model=CompetitionStudent,
form=CompetitionStudentForm,
extra=1,
can_delete=False,
max_num=3,
validate_max=True,
min_num=1,
validate_min=True,
)
CompetitionSupervisorFormSet = inlineformset_factory(
parent_model=Submission,
model=CompetitionSupervisor,
form=CompetitionSupervisorForm,
extra=0,
can_delete=False,
max_num=2,
validate_max=True,
min_num=1,
validate_min=True,
)
class QuestionSubmissionForm(models.ModelForm):
class Meta:
model = Question
fields = [
"school_text",
"course",
"research_question",
"sub_questions",
"message",
"expected_end_date",
]
widgets = {
"research_question": forms.Textarea(attrs={"rows": 2,}),
"sub_questions": forms.Textarea(attrs={"rows": 5,}),
"message": forms.Textarea(attrs={"rows": 8,}),
}
privacy_policy = forms.BooleanField(required=True,)
expected_end_date = forms.DateField(
label=_("Expected end date")
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["school_text"].required = True
self.fields["school_text"].label = _("School")
self.fields["course"].required = True
self.fields["course"].help_text = _(
"The course to which your research relates most"
)
self.fields["research_question"].required = True
self.fields["sub_questions"].required = True
self.fields["message"].required = True
self.fields["message"].help_text = _(
"Try to be as specific as possible. The more clearly the question is asked, the more specifically the answer can be formulated and the faster you will receive an answer. Also clearly state your subject and research plan in the question. We can help you with the following issues: Choosing a specific topic; Arranging a meeting with an expert; Borrowing material from Radboud University; Conducting an experiment at Radboud University; Collection of literature. Of course, other questions are also welcome, we can always give advice."
)
self.fields["privacy_policy"].label = mark_safe(
_(
'The Radboud Pre-University College of Science processes the above data for the purpose of answering the questions. The personal data will not be stored after processing. I agree with the <a href="%s" target="_blank">privacy regulations of Radboud University</a> and with the processing of the data provided by me for the purposes described above.'
)
% settings.PRIVACY_STATEMENT_URL
)
self.fields["expected_end_date"].required = True
self.fields["expected_end_date"].help_text = _(
"DD-MM-YYYY. When do you expect to be finished with your research."
)
self.fields["expected_end_date"].widget.input_type = "date"
def save(self, commit=True):
instance = super().save(commit=False)
schools = School.objects.filter(name=self.cleaned_data["school_text"].lower())
if schools.exists():
instance.school = schools.first()
if commit:
instance.save()
return instance
class QuestionStudentForm(models.ModelForm):
class Meta:
model = QuestionStudent
fields = ["first_name", "last_name", "email"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["first_name"].required = True
self.fields["last_name"].required = True
self.fields["email"].required = True
QuestionStudentFormset = inlineformset_factory(
parent_model=Question,
model=QuestionStudent,
form=QuestionStudentForm,
extra=1,
can_delete=False,
max_num=3,
validate_max=True,
min_num=1,
validate_min=True,
)
```
#### File: pucadmin/frontoffice/tests.py
```python
from django.contrib.auth import get_user_model
from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils import timezone
from django.test import Client, TestCase, RequestFactory
from frontoffice import views
from frontoffice.forms import (
QuestionStudentFormset,
QuestionSubmissionForm,
SubmissionForm,
CompetitionStudentFormset,
CompetitionSupervisorFormSet,
)
from organisations.models import Course, Organisation
from competitions.models import (
Submission,
Competition,
Student as CompetitionStudent,
Supervisor as CompetitionSupervisor,
)
from questions.models import Question, Student as QuestionStudent
def _instantiate_formset(formset_class, data, instance=None, initial=None):
prefix = formset_class().prefix
formset_data = {}
for i, form_data in enumerate(data):
for name, value in form_data.items():
if isinstance(value, list):
for j, inner in enumerate(value):
formset_data["{}-{}-{}_{}".format(prefix, i, name, j)] = inner
else:
formset_data["{}-{}-{}".format(prefix, i, name)] = value
formset_data["{}-TOTAL_FORMS".format(prefix)] = len(data)
formset_data["{}-INITIAL_FORMS".format(prefix)] = 0
if instance:
return formset_class(formset_data, instance=instance, initial=initial)
else:
return formset_class(formset_data, initial=initial)
class QuestionFrontOfficeTest(TestCase):
def setUp(self):
self.organisation = Organisation.objects.create(name="PUC of Science")
self.course = Course.objects.create(
name="natuurkunde", slug="nat", organisation=self.organisation
)
self.form_data = {
"school_text": "Test college Nijmegen",
"course": self.course,
"research_question": "Lorem ipsum dolor sit amet",
"sub_questions": "Test test test",
"message": "Test test test",
"expected_end_date": timezone.datetime(year=2022, month=1, day=1),
"privacy_policy": 1,
}
self.formset_data = [
{
"first_name": "Firstname1",
"last_name": "Lastname1",
"email": "<EMAIL>",
},
{
"first_name": "Firstname2",
"last_name": "Lastname2",
"email": "<EMAIL>",
},
]
self.user = get_user_model().objects.create_user(
username="test1", email="<EMAIL>"
)
self.rf = RequestFactory()
self.view = views.QuestionSubmissionView()
self.client = Client()
self.client.force_login(self.user)
def test_privacy_policy_checked(self):
with self.subTest("Form is valid"):
form = QuestionSubmissionForm(self.form_data)
self.assertTrue(form.is_valid(), msg=dict(form.errors))
with self.subTest("Form is not valid"):
self.form_data["privacy_policy"] = 0
form = QuestionSubmissionForm(self.form_data)
self.assertFalse(form.is_valid(), msg=dict(form.errors))
def test_formset(self):
formset = _instantiate_formset(QuestionStudentFormset, self.formset_data)
self.assertTrue(formset.is_valid())
def test_submit_form(self):
self.form_data["course"] = self.course.id
self.form_data["expected_end_date"] = "01-01-2022"
formset = _instantiate_formset(QuestionStudentFormset, self.formset_data)
data = {**self.form_data, **formset.data}
response = self.client.post(f"/frontoffice/question/", data=data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Question.objects.count(), 1)
self.assertEqual(QuestionStudent.objects.count(), 2)
question = Question.objects.first()
student1 = QuestionStudent.objects.first()
student2 = QuestionStudent.objects.last()
self.assertEqual(question.school_text, self.form_data["school_text"])
self.assertEqual(question.course, self.course)
self.assertEqual(
question.research_question, self.form_data["research_question"]
)
self.assertEqual(question.sub_questions, self.form_data["sub_questions"])
self.assertEqual(question.message, self.form_data["message"])
self.assertEqual(
question.expected_end_date,
timezone.datetime(year=2022, month=1, day=1).date(),
)
self.assertEqual(student1.first_name, self.formset_data[0]["first_name"])
self.assertEqual(student1.last_name, self.formset_data[0]["last_name"])
self.assertEqual(student1.email, self.formset_data[0]["email"])
self.assertEqual(student1.question, question)
self.assertEqual(student2.first_name, self.formset_data[1]["first_name"])
self.assertEqual(student2.last_name, self.formset_data[1]["last_name"])
self.assertEqual(student2.email, self.formset_data[1]["email"])
self.assertEqual(student2.question, question)
class CompetitionFrontOfficeTest(TestCase):
def setUp(self):
self.organisation = Organisation.objects.create(name="PUC of Science")
self.competition = Competition.objects.create(
name="<NAME> 2022",
organisation=self.organisation,
registration_start=timezone.now() - timezone.timedelta(days=1),
registration_end=timezone.now() + timezone.timedelta(days=1),
)
self.course = Course.objects.create(
name="natuurkunde", slug="nat", organisation=self.organisation
)
self.test_file = SimpleUploadedFile(
"test_document.pdf", b"\x00\x00\x00", content_type="application/pdf"
)
self.form_data = {
"title": "Test title",
"competition": self.competition,
"course": self.course,
"abstract": "Lorem ipsum dolor sit amet",
"school_text": "Test test",
"privacy_policy": 1,
}
self.student_formset_data = [
{
"first_name": "Firstname1",
"last_name": "Lastname1",
"address_1": "Address 11",
"address_2": "Address 12",
"zip": "1234 AB",
"town": "Nijmegen",
"phone": "76543210",
"email": "<EMAIL>",
},
{
"first_name": "Firstname2",
"last_name": "Lastname2",
"address_1": "Address 12",
"address_2": "Address 22",
"zip": "4321 AB",
"town": "Nijmegen",
"phone": "01234567",
"email": "<EMAIL>",
},
]
self.supervisor_formset_data = [
{
"first_name": "Firstname1",
"last_name": "Lastname1",
"phone": "76543210",
"email": "<EMAIL>",
"course": self.course,
},
]
self.user = get_user_model().objects.create_user(
username="test1", email="<EMAIL>"
)
self.rf = RequestFactory()
self.view = views.CompetitionSubmissionView()
self.client = Client()
self.client.force_login(self.user)
def test_privacy_policy_checked(self):
with self.subTest("Form is valid"):
form = SubmissionForm(self.form_data, {"document": self.test_file})
self.assertTrue(form.is_valid(), msg=dict(form.errors))
with self.subTest("Form is not valid"):
self.form_data["privacy_policy"] = 0
form = SubmissionForm(self.form_data, {"document": self.test_file})
self.assertFalse(form.is_valid(), msg=dict(form.errors))
def test_formset(self):
student_formset = _instantiate_formset(
CompetitionStudentFormset, self.student_formset_data
)
self.assertTrue(student_formset.is_valid())
supervisor_formset = _instantiate_formset(
CompetitionSupervisorFormSet, self.supervisor_formset_data
)
self.assertTrue(supervisor_formset.is_valid())
def test_submit_form(self):
self.form_data["course"] = self.course.id
self.form_data["competition"] = self.competition.id
self.supervisor_formset_data[0]["course"] = self.course.id
student_formset = _instantiate_formset(
CompetitionStudentFormset, self.student_formset_data
)
supervisor_formset = _instantiate_formset(
CompetitionSupervisorFormSet, self.supervisor_formset_data
)
data = {**self.form_data, **student_formset.data, **supervisor_formset.data}
response = self.client.post(f"/frontoffice/competition/", data, follow=True)
# Test does not work because uploading a file does not work properly in test cases
# self.assertEqual(response.status_code, 200)
# self.assertEqual(Submission.objects.count(), 1)
# self.assertEqual(CompetitionStudent.objects.count(), 2)
# self.assertEqual(CompetitionSupervisor.objects.count(), 1)
# submission = Submission.objects.first()
# student1 = CompetitionStudent.objects.first()
# student2 = CompetitionStudent.objects.last()
# supervisor = CompetitionSupervisor.objects.first()
#
# self.assertEqual(submission.competition, self.competition)
# self.assertEqual(submission.title, self.form_data["title"])
# self.assertEqual(submission.course, self.course)
# self.assertEqual(submission.abstract, self.form_data["abstract"])
# self.assertEqual(submission.school_text, self.form_data["school_text"])
# self.assertEqual(
# student1.first_name, self.student_formset_data[0]["first_name"]
# )
# self.assertEqual(student1.last_name, self.student_formset_data[0]["last_name"])
# self.assertEqual(student1.address_1, self.student_formset_data[0]["address_1"])
# self.assertEqual(student1.address_2, self.student_formset_data[0]["address_2"])
# self.assertEqual(student1.zip, self.student_formset_data[0]["zip"])
# self.assertEqual(student1.town, self.student_formset_data[0]["town"])
# self.assertEqual(student1.phone, self.student_formset_data[0]["phone"])
# self.assertEqual(student1.email, self.student_formset_data[0]["email"])
# self.assertEqual(student1.submission, submission)
# self.assertEqual(
# student2.first_name, self.student_formset_data[1]["first_name"]
# )
# self.assertEqual(student2.last_name, self.student_formset_data[1]["last_name"])
# self.assertEqual(student2.address_1, self.student_formset_data[1]["address_1"])
# self.assertEqual(student2.address_2, self.student_formset_data[1]["address_2"])
# self.assertEqual(student2.zip, self.student_formset_data[1]["zip"])
# self.assertEqual(student2.town, self.student_formset_data[1]["town"])
# self.assertEqual(student2.phone, self.student_formset_data[1]["phone"])
# self.assertEqual(student2.email, self.student_formset_data[1]["email"])
# self.assertEqual(student2.submission, submission)
# self.assertEqual(
# supervisor.first_name, self.supervisor_formset_data[0]["first_name"]
# )
# self.assertEqual(
# supervisor.last_name, self.supervisor_formset_data[0]["last_name"]
# )
# self.assertEqual(supervisor.phone, self.supervisor_formset_data[0]["phone"])
# self.assertEqual(supervisor.email, self.supervisor_formset_data[0]["email"])
# self.assertEqual(supervisor.course, self.course)
# self.assertEqual(supervisor.submission, submission)
```
#### File: pucadmin/organisations/admin.py
```python
from django.contrib import admin
from django.contrib.admin import register
from django.utils.translation import gettext_lazy as _
from .models import Organisation, Course, User
@register(Organisation)
class OrganisationAdmin(admin.ModelAdmin):
pass
@register(Course)
class CourseAdmin(admin.ModelAdmin):
radio_fields = {"organisation": admin.VERTICAL}
list_display = (
"name",
"slug",
"_num_schools",
)
def _num_schools(self, obj):
return obj.schools.count()
_num_schools.short_description = _("#schools")
@register(User)
class UserAdmin(admin.ModelAdmin):
fieldsets = (
(
"Personal",
{
"fields": (
"username",
"first_name",
"last_name",
"email",
"alternative_email",
)
},
),
(
"Administration",
{
"fields": (
"organisation",
"date_joined",
"last_login",
"is_staff",
"is_active",
"is_superuser",
"groups",
"user_permissions",
),
},
),
)
filter_horizontal = (
"groups",
"user_permissions",
)
list_display_links = ("username", "__str__")
list_display = (
"username",
"__str__",
"email",
"alternative_email",
"organisation",
"is_active",
"is_staff",
"last_login",
)
list_filter = (
"organisation",
"is_active",
"is_staff",
"last_login",
)
search_fields = (
"username",
"first_name",
"last_name",
)
```
#### File: pucadmin/questions/tests.py
```python
from django.contrib.auth import get_user_model
from django.core import mail
from django.test import TestCase
from organisations.models import Course, Organisation
from questions.models import Question, CourseAssignee
class QuestionModelTest(TestCase):
def setUp(self):
self.organisation = Organisation.objects.create(name="PUC of Science")
self.assignee1 = get_user_model().objects.create_user(
username="test1", email="<EMAIL>"
)
self.assignee2 = get_user_model().objects.create_user(
username="test2", email="<EMAIL>"
)
self.course = Course.objects.create(
name="natuurkunde", slug="nat", organisation=self.organisation
)
def test_set_first_assignee(self):
self.question = Question(course=self.course)
CourseAssignee.objects.create(assignee=self.assignee1, course=self.course)
self.question.save()
self.assertEqual(self.question.assignee, self.assignee1)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [self.assignee1.email])
def test_change_assignee(self):
self.question = Question(assignee=self.assignee1, course=self.course)
self.question.save()
self.question.assignee = self.assignee2
self.question.save()
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].to, [self.assignee1.email])
self.assertEqual(mail.outbox[1].to, [self.assignee2.email])
```
#### File: pucadmin/secondments/models.py
```python
from django.core.exceptions import ValidationError
from django.db import models
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from organisations.models import Course
from schools.models import School
class TimePeriod(models.Model):
class Meta:
verbose_name = _("time period")
verbose_name_plural = _("time periods")
name = models.CharField(
verbose_name=_("name"), max_length=20, help_text=_("For example, 2019-2020")
)
start = models.DateField()
end = models.DateField()
def clean(self):
super().clean()
errors = {}
if self.start > self.end:
errors.update({"end": _("End date cannot be before start date.")})
if errors:
raise ValidationError(errors)
def __str__(self):
return self.name
class Daypart(models.Model):
class Meta:
verbose_name = _("daypart")
verbose_name_plural = _("dayparts")
name = models.CharField(verbose_name=_("name"), max_length=20)
def __str__(self):
return self.name
class StudyProgram(models.Model):
class Meta:
verbose_name = _("study program")
verbose_name_plural = _("study program")
name = models.CharField(verbose_name=_("name"), max_length=20)
def __str__(self):
return self.name
class Employee(models.Model):
class Meta:
verbose_name = _("employee")
verbose_name_plural = _("employees")
time_period = models.ForeignKey(
TimePeriod,
verbose_name=_("time period"),
on_delete=models.CASCADE,
related_query_name="employees",
related_name="employees",
)
first_name = models.CharField(verbose_name=_("first name"), max_length=20)
last_name = models.CharField(verbose_name=_("last name"), max_length=20)
phone = models.CharField( # django-doctor: disable=nullable-string-field
verbose_name=_("phone"), max_length=20, blank=True, null=True
)
email = models.EmailField(
verbose_name=_("email"), blank=True, null=True
) # django-doctor: disable=nullable-string-field # django-doctor: disable=nullable-string-field
study_program = models.ForeignKey(
StudyProgram,
verbose_name=_("study program"),
on_delete=models.PROTECT,
related_query_name="employees",
related_name="employees",
)
study_year = models.PositiveSmallIntegerField(
verbose_name=_("study year"), null=True, blank=True
)
courses = models.ManyToManyField(Course, verbose_name=_("courses"),)
hours_available = models.PositiveSmallIntegerField(
verbose_name=_("hours available"), null=True, blank=True
)
dayparts = models.ManyToManyField(
Daypart,
verbose_name=_("dayparts"),
related_query_name="dayparts",
related_name="dayparts",
)
drivers_license = models.BooleanField(verbose_name=_("drivers license"))
contract = models.BooleanField(verbose_name=_("contract"))
def __str__(self):
return f"{self.first_name} {self.last_name} ({self.time_period})"
class SecondmentSchool(models.Model):
class Meta:
verbose_name = _("school")
verbose_name_plural = _("schools")
time_period = models.ForeignKey(
TimePeriod,
verbose_name=_("time period"),
on_delete=models.CASCADE,
related_query_name="secondment_schools",
related_name="secondment_schools",
)
school = models.ForeignKey(
School,
verbose_name=_("school"),
on_delete=models.PROTECT,
related_query_name="secondment_schools",
related_name="secondment_schools",
)
contact_person = models.CharField(
verbose_name=_("contact person"), max_length=100, blank=True, null=True
)
phone = models.CharField( # django-doctor: disable=nullable-string-field
verbose_name=_("phone"), max_length=20, blank=True, null=True
)
email = models.EmailField(
verbose_name=_("email"), blank=True, null=True
) # django-doctor: disable=nullable-string-field # django-doctor: disable=nullable-string-field
drivers_license_required = models.BooleanField(
verbose_name=_("drivers license required")
)
remarks = models.TextField(blank=True, null=True)
def __str__(self):
return f"{str(self.school)} ({self.contact_person}) ({self.time_period})"
class Request(models.Model):
class Meta:
verbose_name = _("request")
verbose_name_plural = _("requests")
school = models.ForeignKey(
SecondmentSchool,
verbose_name=_("school"),
on_delete=models.PROTECT,
related_query_name="requests",
related_name="requests",
)
course = models.ForeignKey(
Course,
verbose_name=_("course"),
on_delete=models.PROTECT,
related_query_name="secondment_requests",
related_name="secondment_requests",
)
num_hours = models.PositiveSmallIntegerField(
verbose_name=_("num. hours"), null=False, blank=False
)
dayparts = models.ManyToManyField(
Daypart,
verbose_name=_("dayparts"),
related_name="requests",
related_query_name="requests",
)
employee = models.ForeignKey(
Employee,
verbose_name=_("employee"),
on_delete=models.PROTECT,
blank=True,
null=True,
related_query_name="secondments",
related_name="secondments",
)
remarks = models.TextField(verbose_name=_("remarks"), blank=True, null=True)
@property
def candidates_url(self):
url = reverse("admin:secondments_employee_changelist")
url += "?"
url += f"time_period__id__exact={self.school.time_period.id}"
url += f"&courses__id__exact={self.course.id}"
dayparts = [str(x.id) for x in self.dayparts.all()]
url += f"&dayparts={','.join(dayparts)}"
if self.school.drivers_license_required:
url += "&drivers_license__exact=1"
return url
def __str__(self):
return _("Secondment request for %(course)s by %(school)s.") % {
"course": self.course,
"school": self.school,
}
```
#### File: pucadmin/sp/utils.py
```python
import datetime
from django.conf import settings
from django.contrib import auth
from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import get_object_or_404
from django.utils.module_loading import import_string
from .models import IdP
IDP_SESSION_KEY = "_idpid"
NAMEID_SESSION_KEY = "_nameid"
def authenticate(request, idp, saml):
return auth.authenticate(request, idp=idp, saml=saml)
def login(request, user, idp, saml):
auth.login(request, user)
# Store the authenticating IdP and actual (not mapped) SAML nameid in the session.
set_session_idp(request, idp, saml.get_nameid())
if idp.respect_expiration:
if not settings.SESSION_SERIALIZER.endswith("PickleSerializer"):
raise ImproperlyConfigured(
"IdP-based session expiration is only supported with the "
"PickleSerializer SESSION_SERIALIZER."
)
try:
dt = datetime.datetime.fromtimestamp(
saml.get_session_expiration(), tz=datetime.timezone.utc
)
request.session.set_expiry(dt)
except TypeError:
pass
def logout(request, idp):
auth.logout(request)
clear_session_idp(request)
def get_request_idp(request, **kwargs):
custom_loader = getattr(settings, "SP_IDP_LOADER", None)
if custom_loader:
return import_string(custom_loader)(request, **kwargs)
else:
return get_object_or_404(IdP, url_params=kwargs, is_active=True)
def get_session_idp(request):
return IdP.objects.filter(pk=request.session.get(IDP_SESSION_KEY)).first()
def get_session_nameid(request):
return request.session.get(NAMEID_SESSION_KEY)
def set_session_idp(request, idp, nameid):
request.session[IDP_SESSION_KEY] = idp.pk
request.session[NAMEID_SESSION_KEY] = nameid
def clear_session_idp(request):
for key in (IDP_SESSION_KEY, NAMEID_SESSION_KEY):
try:
del request.session[key]
except KeyError:
pass
```
#### File: pucadmin/sp/views.py
```python
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core import signing
from django.http import HttpResponse
from django.http.response import HttpResponseBase
from django.shortcuts import redirect, render
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from onelogin.saml2.auth import OneLogin_Saml2_Auth
from onelogin.saml2.settings import OneLogin_Saml2_Settings
from .utils import get_request_idp, get_session_nameid
def metadata(request, **kwargs):
idp = get_request_idp(request, **kwargs)
saml_settings = OneLogin_Saml2_Settings(
settings=idp.sp_settings, sp_validation_only=True
)
return HttpResponse(saml_settings.get_sp_metadata(), content_type="text/xml")
@csrf_exempt
@require_POST
def acs(request, **kwargs):
idp = get_request_idp(request, **kwargs)
if request.POST.get("RelayState"):
try:
# Login with state relayed from our application.
state = signing.loads(request.POST["RelayState"], max_age=idp.state_timeout)
except (signing.BadSignature, signing.SignatureExpired) as ex:
return render(
request,
"sp/error.html",
{
"idp": idp,
"state": None,
"errors": [str(ex)],
"reason": "Invalid SSO request signature.",
},
status=500,
)
else:
# IdP-initiated login.
state = {"test": False, "verify": False, "redir": ""}
saml = OneLogin_Saml2_Auth(idp.prepare_request(request), old_settings=idp.settings)
saml.process_response()
errors = saml.get_errors()
if errors:
return render(
request,
"sp/error.html",
{
"idp": idp,
"state": state,
"errors": errors,
"reason": saml.get_last_error_reason(),
},
status=500,
)
else:
if state.get("test", False):
attrs = []
for saml_attr, value in saml.get_attributes().items():
attr, created = idp.attributes.get_or_create(saml_attribute=saml_attr)
attrs.append((attr, "; ".join(value)))
return render(
request,
"sp/test.html",
{
"idp": idp,
"attrs": attrs,
"nameid": saml.get_nameid(),
"nameid_format": saml.get_nameid_format(),
},
)
elif state.get("verify", False):
user = idp.authenticate(request, saml)
if user == request.user:
return redirect(idp.get_login_redirect(state.get("redir")))
else:
return render(
request,
"sp/unauth.html",
{"nameid": idp.get_nameid(saml), "idp": idp, "verify": True},
status=401,
)
else:
user = idp.authenticate(request, saml)
if user:
if isinstance(user, HttpResponseBase):
return user
else:
idp.login(request, user, saml)
idp.last_login = timezone.now()
idp.save(update_fields=("last_login",))
return redirect(idp.get_login_redirect(state.get("redir")))
else:
return render(
request,
"sp/unauth.html",
{"nameid": idp.get_nameid(saml), "idp": idp, "verify": False},
status=401,
)
def slo(request, **kwargs):
idp = get_request_idp(request, **kwargs)
saml = OneLogin_Saml2_Auth(idp.prepare_request(request), old_settings=idp.settings)
state = request.GET.get("RelayState")
redir = saml.process_slo()
errors = saml.get_errors()
if errors:
return render(
request,
"sp/error.html",
{
"idp": idp,
"state": state,
"errors": errors,
"reason": saml.get_last_error_reason(),
},
status=500,
)
else:
idp.logout(request)
if not redir:
redir = idp.get_logout_redirect(state)
return redirect(redir)
def login(request, test=False, verify=False, **kwargs):
idp = get_request_idp(request, **kwargs)
saml = OneLogin_Saml2_Auth(idp.prepare_request(request), old_settings=idp.settings)
reauth = verify or "reauth" in request.GET
state = signing.dumps(
{
"test": test,
"verify": verify,
"redir": request.GET.get(REDIRECT_FIELD_NAME, ""),
}
)
# When verifying, we want to pass the (unmapped) SAML nameid, stored in the session.
# TODO: do we actually want UPN here, or some other specified mapped field? At least
# Auth0 is pre-populating the email field with nameid, which is not what we want.
nameid = get_session_nameid(request) if verify else None
return redirect(saml.login(state, force_authn=reauth, name_id_value_req=nameid))
def logout(request, **kwargs):
idp = get_request_idp(request, **kwargs)
redir = idp.get_logout_redirect(request.GET.get(REDIRECT_FIELD_NAME))
saml = OneLogin_Saml2_Auth(idp.prepare_request(request), old_settings=idp.settings)
if saml.get_slo_url() and idp.logout_triggers_slo:
# If the IdP supports SLO, send it a logout request (it will call our SLO).
return redirect(saml.logout(redir))
else:
# Handle the logout "locally", i.e. log out via django.contrib.auth by default.
idp.logout(request)
return redirect(redir)
``` |
{
"source": "jobe3774/raspend",
"score": 3
} |
#### File: raspend/utils/workerthreads.py
```python
import threading
from abc import ABC, abstractmethod
from enum import Enum
from datetime import datetime, date, time, timedelta
class ThreadHandlerBase(ABC):
""" This abstract class describes the basic structure of a raspend thread handler.
Derive this class and implement the 'prepare' and 'invoke' methods.
'prepare' is called prior to running the thread's loop and 'invoke' is called
for every loop iteration. Every thread and therefore every handler instance
uses the same shared dictionary to read and write it's data.
"""
def __init__(self):
return super().__init__()
def setSharedDict(self, sharedDict):
""" Sets the shared dictionary.
"""
self.sharedDict = sharedDict
def setShutdownFlag(self, shutdownFlag):
""" Supplies the handler with the application's shutdown flag.
Use this shutdown flag for aborting loops within 'invoke'.
For example if you are reading a serial port in a loop, you should always check this flag.
"""
self.shutdownFlag = shutdownFlag
def aborted(self):
""" Convenience function for checking the shutdown flag.
"""
return self.shutdownFlag.is_set()
@abstractmethod
def prepare(self):
""" This method is called prior to running the thread's loop.
"""
pass
@abstractmethod
def invoke(self):
""" This method is called for every loop iteration.
"""
pass
class WorkerThreadBase(ABC, threading.Thread):
""" The base class for every worker thread.
"""
def __init__(self, shutdownEvent, accessLock, threadHandler):
""" Parameters:
shutdownEvent - an 'Event' object for gracefully shutting down this thread.
accessLock - a 'Lock' object for synchronizing access to the thread handler.
threadHandler - an instance of a class deriving 'ThreadHandlerBase'.
"""
threading.Thread.__init__(self)
self.shutdownEvent = shutdownEvent
self.accessLock = accessLock
if not isinstance(threadHandler, ThreadHandlerBase):
raise TypeError("'threadHandler' must be a derivative of 'ThreadHandlerBase'.")
self.threadHandler = threadHandler
return
@abstractmethod
def run(self):
pass
class WorkerThread(WorkerThreadBase):
""" A worker thread. It sleeps for 'waitTimeout' seconds before doing the next iteration.
The run - loop runs until 'shutdownEvent' has been signaled.
"""
def __init__(self, shutdownEvent, accessLock, threadHandler, waitTimeout):
super().__init__(shutdownEvent, accessLock, threadHandler)
self.waitTimeout = waitTimeout
return
def run(self):
self.accessLock.acquire()
self.threadHandler.prepare()
self.accessLock.release()
while not self.shutdownEvent.is_set():
self.accessLock.acquire()
self.threadHandler.invoke()
self.accessLock.release()
self.shutdownEvent.wait(self.waitTimeout)
return
class ScheduleRepetitionType(Enum):
""" Constants describing the repetition rate of a scheduled worker thread.
"""
WEEKLY = 1
DAILY = 2
HOURLY = 3
MINUTELY = 4
SECOND = 5
class ScheduledWorkerThread(WorkerThreadBase):
""" A worker thread using a schedule date and time for doing an iteration.
'repetitionType' and 'repetitionFactor' describe the frequency iterations take place.
"""
def __init__(self, shutdownEvent, accessLock, threadHandler, scheduledTime=None, scheduledDate=None, repetitionType=None, repetitionFactor=1):
super().__init__(shutdownEvent, accessLock, threadHandler)
if scheduledTime is None:
scheduledTime = datetime.now().time()
if scheduledDate is None:
scheduledDate = datetime.now().date()
self.scheduledStart = datetime.combine(scheduledDate, scheduledTime)
if repetitionType and not isinstance(repetitionType, ScheduleRepetitionType):
raise TypeError("'repetionType' must be of type 'ScheduleRepetitionType' or None.")
elif repetitionType is None:
repetitionType = ScheduleRepetitionType.DAILY
if repetitionFactor < 1:
raise ValueError("'repetitionFactor' must be 1 or greater.")
self.repetitionType = repetitionType
self.repetitionFactor = repetitionFactor
return
def getTimedeltaFactors(self):
weeks = days = hours = minutes = seconds = 0
if self.repetitionType == ScheduleRepetitionType.WEEKLY:
weeks = self.repetitionFactor
if self.repetitionType == ScheduleRepetitionType.DAILY:
days = self.repetitionFactor
if self.repetitionType == ScheduleRepetitionType.HOURLY:
hours = self.repetitionFactor
if self.repetitionType == ScheduleRepetitionType.MINUTELY:
minutes = self.repetitionFactor
if self.repetitionType == ScheduleRepetitionType.SECOND:
seconds = self.repetitionFactor
return weeks, days, hours, minutes, seconds
def run(self):
self.accessLock.acquire()
self.threadHandler.prepare()
self.accessLock.release()
weeks, days, hours, minutes, seconds = self.getTimedeltaFactors()
tNow = datetime.now()
t0 = self.scheduledStart
timeout = (t0 - tNow).total_seconds()
# If timeout is negative, then we already passed start time.
# In that case we calculate the timeout for the coming iteration.
while timeout < 0.0:
t1 = t0 + timedelta(days = days, weeks = weeks, hours = hours, minutes = minutes, seconds = seconds)
timeout = (t1 - datetime.now()).total_seconds()
t0 = t1
while not self.shutdownEvent.wait(timeout):
self.accessLock.acquire()
self.threadHandler.invoke()
self.accessLock.release()
t1 = t0 + timedelta(days = days, weeks = weeks, hours = hours, minutes = minutes, seconds = seconds)
timeout = (t1 - datetime.now()).total_seconds()
t0 = t1
return
``` |
{
"source": "jobe3774/smartmeter",
"score": 3
} |
#### File: jobe3774/smartmeter/smartmeter.py
```python
import RPi.GPIO as GPIO
import logging
import json
import os
import argparse
import serial
import re
from tzlocal import get_localzone
from datetime import datetime, timedelta, time, timezone
from raspend import RaspendApplication, ThreadHandlerBase
from collections import namedtuple
class SmartMeterKeys:
""" OBIS codes of the EBZ DD3.
"""
POWER_IMPORT = "1.8.0"
POWER_EXPORT = "2.8.0"
CURRENT_POWER_SUM = "16.7.0"
CURRENT_POWER_L1 = "36.7.0"
CURRENT_POWER_L2 = "56.7.0"
CURRENT_POWER_L3 = "76.7.0"
class SmartMeterConstants:
""" Some constants used for identifying begin and end of a datagram.
"""
DATAGRAM_INITIATOR = '/'
DATAGRAM_TERMINATOR = '!'
class ReadSmartMeter(ThreadHandlerBase):
""" This class reads the datagrams of the EBZ DD3 from the USB device attached to the 'Info-DSS' of the smart meter.
"""
def __init__(self, sectionName, serialPort, localTimeZone):
self.sectionName = sectionName
self.serialPort = serialPort
self.localTimeZone = localTimeZone
self.datagramBuffer = list()
self.OBISCodeMap = dict()
self.OBISCodeMap[SmartMeterKeys.POWER_IMPORT] = "POWER_IMPORT"
self.OBISCodeMap[SmartMeterKeys.POWER_EXPORT] = "POWER_EXPORT"
self.OBISCodeMap[SmartMeterKeys.CURRENT_POWER_SUM] = "CURRENT_POWER_SUM"
self.OBISCodeMap[SmartMeterKeys.CURRENT_POWER_L1] = "CURRENT_POWER_L1"
self.OBISCodeMap[SmartMeterKeys.CURRENT_POWER_L2] = "CURRENT_POWER_L2"
self.OBISCodeMap[SmartMeterKeys.CURRENT_POWER_L3] = "CURRENT_POWER_L3"
return
def extractSmartMeterValues(self, datagram):
""" This method extracts only the relevant parts of the datagram and writes them into the shared dictionary.
"""
regex = r"1-0:(\d+.[8|7].0)\*255\((-?\d+.\d+)\*(\w+)\)"
matches = re.finditer(regex, datagram)
thisDict = self.sharedDict[self.sectionName]
thisDict["timestampUTC"] = datetime.now(timezone.utc).isoformat()
for match in matches:
strOBISCode = match.group(1)
if strOBISCode in self.OBISCodeMap:
thisDict[self.OBISCodeMap[strOBISCode]] = {"OBIS_Code": strOBISCode, "value": round(float(match.group(2)), 3), "unit" : match.group(3)}
return
def prepare(self):
""" Open the connected USB device for reading.
"""
if not self.sectionName in self.sharedDict:
self.sharedDict[self.sectionName] = dict()
self.serial = serial.Serial(self.serialPort,
baudrate = 9600,
parity=serial.PARITY_EVEN,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.SEVENBITS,
timeout=1)
return
def invoke(self):
""" Reads one datagram per invocation.
Since the smart meter pushes one datagram every second, this should be the minimal timeout for this method.
Currently this method is invoked every 5 seconds.
"""
readDatagram = True
beginDatagram = False
endDatagram = False
self.datagramBuffer.clear()
while not self.aborted() and readDatagram:
c = self.serial.read().decode("utf-8")
if c == SmartMeterConstants.DATAGRAM_INITIATOR:
beginDatagram = True
endDatagram = False
if c == SmartMeterConstants.DATAGRAM_TERMINATOR and beginDatagram:
beginDatagram = False
endDatagram = True
if beginDatagram and not endDatagram:
self.datagramBuffer.append(c)
if endDatagram and not beginDatagram:
self.datagramBuffer.append(c)
self.extractSmartMeterValues(''.join(self.datagramBuffer))
readDatagram = beginDatagram = endDatagram = False
return
class S0InterfaceReader():
""" This class counts the pulses of the Finder smart meter.
On every rising edge detected, the GPIO interface invokes the ISR method below.
"""
def __init__(self, sectionName, sharedDict, accessLock):
self.sectionName = sectionName
self.sharedDict = sharedDict
if sectionName not in self.sharedDict:
self.sharedDict[sectionName] = {"count" : 0.0, "timestampUTC": datetime.now(timezone.utc).isoformat()}
self.accessLock = accessLock
def setValue(self, value):
""" This method is used to set the initial counter value of the smart meter.
"""
success = False
self.accessLock.acquire()
try:
thisDict = self.sharedDict[self.sectionName]
thisDict["count"] = float(value)
thisDict["timestampUTC"] = datetime.now(timezone.utc).isoformat()
success = True
except Exception as e:
print(e)
finally:
self.accessLock.release()
return success
def ISR(self, channel):
""" This is the interrupt service routine invoked by the GPIO interface when a rising edge has been detected.
"""
self.accessLock.acquire()
try:
thisDict = self.sharedDict[self.sectionName]
thisDict["count"] = thisDict["count"] + 0.001
thisDict["timestampUTC"] = datetime.now(timezone.utc).isoformat()
except Exception as e:
print (e)
finally:
self.accessLock.release()
def main():
localTimeZone = get_localzone()
logging.basicConfig(filename='smartmeter.log', level=logging.INFO)
logging.info("Starting at {} (PID={})".format(datetime.now(localTimeZone), os.getpid()))
# Check commandline arguments.
cmdLineParser = argparse.ArgumentParser(prog="smartmeter", usage="%(prog)s [options]")
cmdLineParser.add_argument("--port", help="The port number the server should listen on", type=int, required=True)
cmdLineParser.add_argument("--serialPort", help="The serial port to read from", type=str, required=True)
cmdLineParser.add_argument("--s0Pin", help="The BCM number of the pin connected to the S0 interface", type=int, required=False)
try:
args = cmdLineParser.parse_args()
except SystemExit:
return
try:
myApp = RaspendApplication(args.port)
myApp.createWorkerThread(ReadSmartMeter("smartmeter_d0", args.serialPort, localTimeZone), 5)
s0Interface = S0InterfaceReader("smartmeter_s0", myApp.getSharedDict(), myApp.getAccessLock())
if args.s0Pin is not None:
# Making this method available as a command enables us to set the initial value via HTTP GET.
# http://<IP-OF-YOUR-RPI>:<PORT>/cmd?name=s0Interface.setValue&value=<COUNT>
myApp.addCommand(s0Interface.setValue);
# Setup the GPIO pin for detecting rising edges.
GPIO.setmode(GPIO.BCM)
GPIO.setup(args.s0Pin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
GPIO.add_event_detect(args.s0Pin, GPIO.RISING, callback = s0Interface.ISR, bouncetime = 200)
myApp.run()
logging.info("Stopped at {} (PID={})".format(datetime.now(localTimeZone), os.getpid()))
except Exception as e:
logging.exception("Unexpected error occured!", exc_info = True)
finally:
if args.s0Pin is not None:
GPIO.cleanup()
if __name__ == "__main__":
main()
``` |
{
"source": "jobechoi/bazel",
"score": 2
} |
#### File: testdata/config_apis_test/input.bzl
```python
def exercise_the_api():
_var6 = configuration_field("foo", "bar")
exercise_the_api()
def transition_func(settings):
"""A no-op transition function."""
return settings
my_transition = transition(implementation = transition_func, inputs = [], outputs = [])
def _build_setting_impl(ctx):
return []
string_flag = rule(
implementation = _build_setting_impl,
build_setting = config.string(flag = True),
)
int_setting = rule(
implementation = _build_setting_impl,
build_setting = config.int(flag = False),
)
```
#### File: testdata/proto_format_test/input.bzl
```python
def check_function(foo):
"""Runs some checks on the given function parameter.
This rule runs checks on a given function parameter.
Use `bazel build` to run the check.
Args:
foo: A unique name for this rule.
"""
pass
example = provider(
doc = "Stores information about an example.",
fields = {
"foo": "A string representing foo",
"bar": "A string representing bar",
"baz": "A string representing baz",
},
)
def _rule_impl(ctx):
print("Hello World")
my_example = rule(
implementation = _rule_impl,
doc = "Small example of rule.",
attrs = {
"useless": attr.string(
doc = "This argument will be ignored.",
default = "ignoreme",
),
},
)
```
#### File: py/bazel/bazel_workspace_test.py
```python
import os
import unittest
from src.test.py.bazel import test_base
class BazelWorkspaceTest(test_base.TestBase):
def testWorkspaceDotBazelFileInMainRepo(self):
workspace_dot_bazel = self.ScratchFile("WORKSPACE.bazel")
self.ScratchFile("BUILD", [
"py_binary(",
" name = 'bin',",
" srcs = ['bin.py'],",
")",
])
self.ScratchFile("bin.py")
exit_code, _, stderr = self.RunBazel(["build", "//:bin"])
self.AssertExitCode(exit_code, 0, stderr)
# If WORKSPACE.bazel is deleted and no WORKSPACE exists,
# the build should fail.
os.remove(workspace_dot_bazel)
exit_code, _, stderr = self.RunBazel(["build", "//:bin"])
self.AssertExitCode(exit_code, 2, stderr)
def testWorkspaceDotBazelFileWithExternalRepo(self):
self.ScratchDir("A")
self.ScratchFile("A/WORKSPACE.bazel")
self.ScratchFile("A/BUILD", [
"py_library(",
" name = 'lib',",
" srcs = ['lib.py'],",
" visibility = ['//visibility:public'],",
")",
])
self.ScratchFile("A/lib.py")
work_dir = self.ScratchDir("B")
# Test WORKSPACE.bazel takes priority over WORKSPACE
self.ScratchFile("B/WORKSPACE")
workspace_dot_bazel = self.ScratchFile(
"B/WORKSPACE.bazel", ["local_repository(name = 'A', path='../A')"])
self.ScratchFile("B/bin.py")
self.ScratchFile("B/BUILD", [
"py_binary(",
" name = 'bin',",
" srcs = ['bin.py'],",
" deps = ['@A//:lib'],",
")",
])
exit_code, _, stderr = self.RunBazel(args=["build", ":bin"], cwd=work_dir)
self.AssertExitCode(exit_code, 0, stderr)
# Test WORKSPACE takes effect after deleting WORKSPACE.bazel
os.remove(workspace_dot_bazel)
exit_code, _, stderr = self.RunBazel(args=["build", ":bin"], cwd=work_dir)
self.AssertExitCode(exit_code, 1, stderr)
self.assertIn("no such package '@A//'", "".join(stderr))
# Test a WORKSPACE.bazel directory won't confuse Bazel
self.ScratchFile("B/WORKSPACE",
["local_repository(name = 'A', path='../A')"])
self.ScratchDir("B/WORKSPACE.bazel")
exit_code, _, stderr = self.RunBazel(args=["build", ":bin"], cwd=work_dir)
self.AssertExitCode(exit_code, 0, stderr)
if __name__ == "__main__":
unittest.main()
```
#### File: py/bazel/query_test.py
```python
import unittest
from src.test.py.bazel import test_base
class QueryTest(test_base.TestBase):
def testSimpleQuery(self):
self.ScratchFile('WORKSPACE')
self.ScratchFile('foo/BUILD', [
'exports_files(["exported.txt"])',
'filegroup(name = "top-rule", srcs = [":dep-rule"])',
'filegroup(name = "dep-rule", srcs = ["src.txt"])',
])
self.ScratchFile('foo/src.txt')
self.ScratchFile('foo/exported.txt')
self.ScratchFile('foo/non-exported.txt')
self._AssertQueryOutput('//foo:top-rule', '//foo:top-rule')
self._AssertQueryOutput('//foo:*', '//foo:top-rule', '//foo:dep-rule',
'//foo:src.txt', '//foo:exported.txt',
'//foo:BUILD')
self._AssertQueryOutput('deps(//foo:top-rule)', '//foo:top-rule',
'//foo:dep-rule', '//foo:src.txt')
self._AssertQueryOutput('deps(//foo:top-rule, 1)', '//foo:top-rule',
'//foo:dep-rule')
def _AssertQueryOutput(self, query_expr, *expected_results):
exit_code, stdout, stderr = self.RunBazel(['query', query_expr])
self.AssertExitCode(exit_code, 0, stderr)
stdout = sorted(x for x in stdout if x)
self.assertEqual(len(stdout), len(expected_results))
self.assertListEqual(stdout, sorted(expected_results))
if __name__ == '__main__':
unittest.main()
```
#### File: absl/testing/_bazelize_command.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
def get_executable_path(py_binary_path):
"""Returns the executable path of a py_binary.
This returns the executable path of a py_binary that is in another Bazel
target's data dependencies.
On Linux/macOS, it's the same as the py_binary_path.
On Windows, the py_binary_path points to a zip file, and Bazel 0.5.3+
generates a .cmd file that can be used to execute the py_binary.
Args:
py_binary_path: string, the path of a py_binary that is in another Bazel
target's data dependencies.
"""
if os.name == 'nt':
executable_path = py_binary_path + '.cmd'
if executable_path.startswith('\\\\?\\'):
# In Bazel 0.5.3 and Python 3, the paths starts with "\\?\".
# However, Python subprocess doesn't support those paths well.
# Strip them as we don't need the prefix.
# See this page for more informaton about "\\?\":
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.
executable_path = executable_path[4:]
return executable_path
else:
return py_binary_path
```
#### File: tools/android/instrumentation_test_check_test.py
```python
import unittest
from tools.android.instrumentation_test_check import _ExtractTargetPackageName
from tools.android.instrumentation_test_check import _ExtractTargetPackageToInstrument
from tools.android.instrumentation_test_check import _ValidateManifestPackageNames
from tools.android.instrumentation_test_check import ManifestError
INSTRUMENTATION_MANIFEST = """<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="com.example.test" >
<instrumentation android:targetPackage="com.example"
android:name="android.support.test.runner.AndroidJUnitRunner"/>
<application android:label="Test"/>
</manifest>
"""
INCORRECT_INSTRUMENTATION_MANIFEST = """<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="com.example.test" >
<instrumentation android:targetPackage="not.com.example"
android:name="android.support.test.runner.AndroidJUnitRunner"/>
<application android:label="Test"/>
</manifest>
"""
TARGET_MANIFEST = """<?xml version="2.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="com.example" >
<application android:label="App" />
</manifest>
"""
class InstrumentationTestCheckTest(unittest.TestCase):
def test_extract_instrumentation_target_package(self):
self.assertEqual(
_ExtractTargetPackageToInstrument(INSTRUMENTATION_MANIFEST, ""),
"com.example")
def test_extract_target_package(self):
self.assertEqual(
_ExtractTargetPackageName(TARGET_MANIFEST, "unused"), "com.example")
def test_target_package_check(self):
self.assertEqual(
_ValidateManifestPackageNames(INSTRUMENTATION_MANIFEST, "unused",
TARGET_MANIFEST, "unused"),
("com.example", "com.example"))
def test_target_package_check_failure(self):
with self.assertRaises(ManifestError):
_ValidateManifestPackageNames(INCORRECT_INSTRUMENTATION_MANIFEST,
"unused", TARGET_MANIFEST, "unused")
if __name__ == "__main__":
unittest.main()
```
#### File: aquery_differ/resolvers/dep_set_resolver.py
```python
import copy
class DepSetResolver(object):
"""Utility class to resolve the dependency nested set."""
def __init__(self, dep_set_of_files, artifact_id_to_path):
self.dep_set_to_artifact_ids = {}
self.id_to_dep_set = {dep_set.id: dep_set for dep_set in dep_set_of_files}
self.artifact_id_to_path = artifact_id_to_path
def resolve(self, dep_set):
"""Given a dep set, return the flattened list of input artifact ids.
Args:
dep_set: the dep set object to be resolved.
Returns:
The flattened list of input artifact ids.
"""
if dep_set.id in self.dep_set_to_artifact_ids:
return self.dep_set_to_artifact_ids[dep_set.id]
artifact_ids = copy.copy([
self.artifact_id_to_path[artifact_id]
for artifact_id in dep_set.direct_artifact_ids
])
for transitive_dep_set_id in dep_set.transitive_dep_set_ids:
artifact_ids.extend(
self.resolve(self.id_to_dep_set[transitive_dep_set_id]))
self.dep_set_to_artifact_ids[dep_set.id] = artifact_ids
return self.dep_set_to_artifact_ids[dep_set.id]
```
#### File: ctexplain/analyses/summary.py
```python
from typing import Tuple
# Do not edit this line. Copybara replaces it with PY2 migration helper.
from dataclasses import dataclass
from tools.ctexplain.types import ConfiguredTarget
# Do not edit this line. Copybara replaces it with PY2 migration helper..third_party.bazel.tools.ctexplain.util as util
@dataclass(frozen=True)
class _Summary():
"""Analysis result."""
# Number of configurations in the build's configured target graph.
configurations: int
# Number of unique target labels.
targets: int
# Number of configured targets.
configured_targets: int
# Number of targets that produce multiple configured targets. This is more
# subtle than computing configured_targets - targets. For example, if
# targets=2 and configured_targets=4, that could mean both targets are
# configured twice. Or it could mean the first target is configured 3 times.
repeated_targets: int
def analyze(cts: Tuple[ConfiguredTarget, ...]) -> _Summary:
"""Runs the analysis on a build's configured targets."""
configurations = set()
targets = set()
label_count = {}
for ct in cts:
configurations.add(ct.config_hash)
targets.add(ct.label)
label_count[ct.label] = label_count.setdefault(ct.label, 0) + 1
configured_targets = len(cts)
repeated_targets = sum([1 for count in label_count.values() if count > 1])
return _Summary(
len(configurations), len(targets), configured_targets, repeated_targets)
def report(result: _Summary) -> None:
"""Reports analysis results to the user.
We intentionally make this its own function to make it easy to support other
output formats (like machine-readable) if we ever want to do that.
Args:
result: the analysis result
"""
ct_surplus = util.percent_diff(result.targets, result.configured_targets)
print(f"""
Configurations: {result.configurations}
Targets: {result.targets}
Configured targets: {result.configured_targets} ({ct_surplus} vs. targets)
Targets with multiple configs: {result.repeated_targets}
""")
```
#### File: tools/ctexplain/bazel_api.py
```python
import json
import os
import subprocess
from typing import Callable
from typing import List
from typing import Tuple
# Do not edit this line. Copybara replaces it with PY2 migration helper.
from frozendict import frozendict
from tools.ctexplain.types import Configuration
from tools.ctexplain.types import ConfiguredTarget
from tools.ctexplain.types import HostConfiguration
from tools.ctexplain.types import NullConfiguration
def run_bazel_in_client(args: List[str]) -> Tuple[int, List[str], List[str]]:
"""Calls bazel within the current workspace.
For production use. Tests use an alternative invoker that goes through test
infrastructure.
Args:
args: the arguments to call Bazel with
Returns:
Tuple of (return code, stdout, stderr)
"""
result = subprocess.run(
["blaze"] + args,
cwd=os.getcwd(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=False)
return (result.returncode, result.stdout.decode("utf-8").split(os.linesep),
result.stderr)
class BazelApi():
"""API that accepts injectable Bazel invocation logic."""
def __init__(self,
run_bazel: Callable[[List[str]],
Tuple[int, List[str],
List[str]]] = run_bazel_in_client):
self.run_bazel = run_bazel
def cquery(self,
args: List[str]) -> Tuple[bool, str, Tuple[ConfiguredTarget, ...]]:
"""Calls cquery with the given arguments.
Args:
args: A list of cquery command-line arguments, one argument per entry.
Returns:
(success, stderr, cts), where success is True iff the query succeeded,
stderr contains the query's stderr (regardless of success value), and cts
is the configured targets found by the query if successful, empty
otherwise.
ct order preserves cquery's output order. This is topologically sorted
with duplicates removed. So no unique configured target appears twice and
if A depends on B, A appears before B.
"""
base_args = ["cquery", "--show_config_fragments=transitive"]
(returncode, stdout, stderr) = self.run_bazel(base_args + args)
if returncode != 0:
return (False, stderr, ())
cts = []
for line in stdout:
if not line.strip():
continue
ctinfo = _parse_cquery_result_line(line)
if ctinfo is not None:
cts.append(ctinfo)
return (True, stderr, tuple(cts))
def get_config(self, config_hash: str) -> Configuration:
"""Calls "bazel config" with the given config hash.
Args:
config_hash: A config hash as reported by "bazel cquery".
Returns:
The matching configuration or None if no match is found.
Raises:
ValueError: On any parsing problems.
"""
if config_hash == "HOST":
return HostConfiguration()
elif config_hash == "null":
return NullConfiguration()
base_args = ["config", "--output=json"]
(returncode, stdout, stderr) = self.run_bazel(base_args + [config_hash])
if returncode != 0:
raise ValueError("Could not get config: " + stderr)
config_json = json.loads(os.linesep.join(stdout))
fragments = frozendict({
_base_name(entry["name"]):
tuple(_base_name(clazz) for clazz in entry["fragmentOptions"])
for entry in config_json["fragments"]
})
options = frozendict({
_base_name(entry["name"]): frozendict(entry["options"])
for entry in config_json["fragmentOptions"]
})
return Configuration(fragments, options)
# TODO(gregce): have cquery --output=jsonproto support --show_config_fragments
# so we can replace all this regex parsing with JSON reads.
def _parse_cquery_result_line(line: str) -> ConfiguredTarget:
"""Converts a cquery output line to a ConfiguredTarget.
Expected input is:
"<label> (<config hash>) [configFragment1, configFragment2, ...]"
or:
"<label> (null)"
Args:
line: The expected input.
Returns:
Corresponding ConfiguredTarget if the line matches else None.
"""
tokens = line.split(maxsplit=2)
label = tokens[0]
if tokens[1][0] != "(" or tokens[1][-1] != ")":
raise ValueError(f"{tokens[1]} in {line} not surrounded by parentheses")
config_hash = tokens[1][1:-1]
if config_hash == "null":
fragments = ()
else:
if tokens[2][0] != "[" or tokens[2][-1] != "]":
raise ValueError(f"{tokens[2]} in {line} not surrounded by [] brackets")
# The fragments list looks like '[Fragment1, Fragment2, ...]'. Split the
# whole line on ' [' to get just this list, then remove the final ']', then
# split again on ', ' to convert it to a structured tuple.
fragments = tuple(line.split(" [")[1][0:-1].split(", "))
return ConfiguredTarget(
label=label,
config=None, # Not yet available: we'll need `bazel config` to get this.
config_hash=config_hash,
transitive_fragments=fragments)
def _base_name(full_name: str) -> str:
"""Strips a fully qualified Java class name to the file scope.
Examples:
- "A.B.OuterClass" -> "OuterClass"
- "A.B.OuterClass$InnerClass" -> "OuterClass$InnerClass"
Args:
full_name: Fully qualified class name.
Returns:
Stripped name.
"""
return full_name.split(".")[-1]
```
#### File: tools/ctexplain/bazel_api_test.py
```python
import os
import unittest
from src.test.py.bazel import test_base
from tools.ctexplain.bazel_api import BazelApi
from tools.ctexplain.types import HostConfiguration
from tools.ctexplain.types import NullConfiguration
class BazelApiTest(test_base.TestBase):
_bazel_api: BazelApi = None
def setUp(self):
test_base.TestBase.setUp(self)
self._bazel_api = BazelApi(self.RunBazel)
self.ScratchFile('WORKSPACE')
self.CreateWorkspaceWithDefaultRepos('repo/WORKSPACE')
def tearDown(self):
test_base.TestBase.tearDown(self)
def testBasicCquery(self):
self.ScratchFile('testapp/BUILD', [
'filegroup(name = "fg", srcs = ["a.file"])',
])
res = self._bazel_api.cquery(['//testapp:all'])
success = res[0]
cts = res[2]
self.assertTrue(success)
self.assertEqual(len(cts), 1)
self.assertEqual(cts[0].label, '//testapp:fg')
self.assertIsNone(cts[0].config)
self.assertGreater(len(cts[0].config_hash), 10)
self.assertIn('PlatformConfiguration', cts[0].transitive_fragments)
def testFailedCquery(self):
self.ScratchFile('testapp/BUILD', [
'filegroup(name = "fg", srcs = ["a.file"])',
])
(success, stderr, cts) = self._bazel_api.cquery(['//testapp:typo'])
self.assertFalse(success)
self.assertEqual(len(cts), 0)
self.assertIn("target 'typo' not declared in package 'testapp'",
os.linesep.join(stderr))
def testTransitiveFragmentsAccuracy(self):
self.ScratchFile('testapp/BUILD', [
'filegroup(name = "fg", srcs = ["a.file"])',
'filegroup(name = "ccfg", srcs = [":ccbin"])',
'cc_binary(name = "ccbin", srcs = ["ccbin.cc"])'
])
cts1 = self._bazel_api.cquery(['//testapp:fg'])[2]
self.assertNotIn('CppConfiguration', cts1[0].transitive_fragments)
cts2 = self._bazel_api.cquery(['//testapp:ccfg'])[2]
self.assertIn('CppConfiguration', cts2[0].transitive_fragments)
def testGetTargetConfig(self):
self.ScratchFile('testapp/BUILD', [
'filegroup(name = "fg", srcs = ["a.file"])',
])
cts = self._bazel_api.cquery(['//testapp:fg'])[2]
config = self._bazel_api.get_config(cts[0].config_hash)
expected_fragments = ['PlatformConfiguration', 'JavaConfiguration']
for exp in expected_fragments:
self.assertIn(exp, config.fragments.keys())
core_options = config.options['CoreOptions']
self.assertIsNotNone(core_options)
self.assertIn(('stamp', 'false'), core_options.items())
def testGetHostConfig(self):
self.ScratchFile('testapp/BUILD', [
'genrule(',
' name = "g",',
' srcs = [],',
' cmd = "",',
' outs = ["g.out"],',
' tools = [":fg"])',
'filegroup(name = "fg", srcs = ["a.file"])',
])
query = ['//testapp:fg', '--universe_scope=//testapp:g']
cts = self._bazel_api.cquery(query)[2]
config = self._bazel_api.get_config(cts[0].config_hash)
self.assertIsInstance(config, HostConfiguration)
# We don't currently populate or read a host configuration's details.
self.assertEqual(len(config.fragments), 0)
self.assertEqual(len(config.options), 0)
def testGetNullConfig(self):
self.ScratchFile('testapp/BUILD', [
'filegroup(name = "fg", srcs = ["a.file"])',
])
cts = self._bazel_api.cquery(['//testapp:a.file'])[2]
config = self._bazel_api.get_config(cts[0].config_hash)
self.assertIsInstance(config, NullConfiguration)
# Null configurations have no information by definition.
self.assertEqual(len(config.fragments), 0)
self.assertEqual(len(config.options), 0)
def testConfigFragmentsMap(self):
self.ScratchFile('testapp/BUILD', [
'filegroup(name = "fg", srcs = ["a.file"])',
])
cts = self._bazel_api.cquery(['//testapp:fg'])[2]
fragments_map = self._bazel_api.get_config(cts[0].config_hash).fragments
self.assertIn('PlatformOptions', fragments_map['PlatformConfiguration'])
self.assertIn('ShellConfiguration$Options',
fragments_map['ShellConfiguration'])
def testConfigWithDefines(self):
self.ScratchFile('testapp/BUILD', [
'filegroup(name = "fg", srcs = ["a.file"])',
])
cquery_args = ['//testapp:fg', '--define', 'a=b']
cts = self._bazel_api.cquery(cquery_args)[2]
config = self._bazel_api.get_config(cts[0].config_hash)
user_defined_options = config.options['user-defined']
self.assertIsNotNone(user_defined_options)
self.assertDictEqual(user_defined_options._dict, {'--define:a': 'b'})
def testConfigWithStarlarkFlags(self):
self.ScratchFile('testapp/defs.bzl', [
'def _flag_impl(settings, attr):', ' pass', 'string_flag = rule(',
' implementation = _flag_impl,',
' build_setting = config.string(flag = True)'
')'
])
self.ScratchFile('testapp/BUILD', [
'load(":defs.bzl", "string_flag")',
'string_flag(name = "my_flag", build_setting_default = "nada")',
'filegroup(name = "fg", srcs = ["a.file"])',
])
cquery_args = ['//testapp:fg', '--//testapp:my_flag', 'algo']
cts = self._bazel_api.cquery(cquery_args)[2]
config = self._bazel_api.get_config(cts[0].config_hash)
user_defined_options = config.options['user-defined']
self.assertIsNotNone(user_defined_options)
self.assertDictEqual(user_defined_options._dict,
{'//testapp:my_flag': 'algo'})
if __name__ == '__main__':
unittest.main()
```
#### File: tools/ctexplain/lib.py
```python
from typing import Tuple
# Do not edit this line. Copybara replaces it with PY2 migration helper..third_party.bazel.tools.ctexplain.bazel_api as bazel_api
from tools.ctexplain.types import ConfiguredTarget
def analyze_build(bazel: bazel_api.BazelApi, labels: Tuple[str, ...],
build_flags: Tuple[str, ...]) -> Tuple[ConfiguredTarget, ...]:
"""Gets a build invocation's configured targets.
Args:
bazel: API for invoking Bazel.
labels: The targets to build.
build_flags: The build flags to use.
Returns:
Configured targets representing the build.
Raises:
RuntimeError: On any invocation errors.
"""
cquery_args = [f'deps({",".join(labels)})']
cquery_args.extend(build_flags)
(success, stderr, cts) = bazel.cquery(cquery_args)
if not success:
raise RuntimeError("invocation failed: " + stderr.decode("utf-8"))
# We have to do separate calls to "bazel config" to get the actual configs
# from their hashes.
hashes_to_configs = {}
cts_with_configs = []
for ct in cts:
# Don't use dict.setdefault because that unconditionally calls get_config
# as one of its parameters and that's an expensive operation to waste.
if ct.config_hash not in hashes_to_configs:
hashes_to_configs[ct.config_hash] = bazel.get_config(ct.config_hash)
config = hashes_to_configs[ct.config_hash]
cts_with_configs.append(
ConfiguredTarget(ct.label, config, ct.config_hash,
ct.transitive_fragments))
return tuple(cts_with_configs)
```
#### File: tools/distributions/system_repo.bzl
```python
def _system_repo_impl(ctx):
symlinks = ctx.attr.symlinks
for link in symlinks:
target = symlinks[link]
ctx.symlink(target, link)
ctx.file("WORKSPACE", "workspace(name = \"{name}\")\n".format(name = ctx.name))
ctx.file("BUILD.bazel", ctx.read(ctx.attr.build_file))
system_repo = repository_rule(
implementation = _system_repo_impl,
attrs = {
"symlinks": attr.string_dict(
doc = """
Symlinks to create for this system repo. The key is the link path under this repo,
the value should be an absolute target path on the system that we want to link.
""",
),
"build_file": attr.label(
allow_single_file = True,
mandatory = True,
doc = "The file to use as the BUILD file for this repository.",
),
},
doc = "A repository rule for providing system libraries for Bazel build",
)
```
#### File: tools/sh/sh_configure.bzl
```python
def _is_windows(repository_ctx):
"""Returns true if the host OS is Windows."""
return repository_ctx.os.name.startswith("windows")
def _sh_config_impl(repository_ctx):
"""sh_config rule implementation.
Detects the path of the shell interpreter on the local machine and
stores it in a sh_toolchain rule.
Args:
repository_ctx: the repository rule context object
"""
sh_path = repository_ctx.os.environ.get("BAZEL_SH")
if not sh_path:
if _is_windows(repository_ctx):
sh_path = repository_ctx.which("bash.exe")
if sh_path:
# repository_ctx.which returns a path object, convert that to
# string so we can call string.startswith on it.
sh_path = str(sh_path)
# When the Windows Subsystem for Linux is installed there's a
# bash.exe under %WINDIR%\system32\bash.exe that launches Ubuntu
# Bash which cannot run native Windows programs so it's not what
# we want.
windir = repository_ctx.os.environ.get("WINDIR")
if windir and sh_path.startswith(windir):
sh_path = None
else:
sh_path = repository_ctx.which("bash")
if not sh_path:
sh_path = repository_ctx.which("sh")
if not sh_path:
sh_path = ""
if sh_path and _is_windows(repository_ctx):
sh_path = sh_path.replace("\\", "/")
repository_ctx.file("BUILD", """
load("@bazel_tools//tools/sh:sh_toolchain.bzl", "sh_toolchain")
sh_toolchain(
name = "local_sh",
path = "{sh_path}",
visibility = ["//visibility:public"],
)
toolchain(
name = "local_sh_toolchain",
toolchain = ":local_sh",
toolchain_type = "@bazel_tools//tools/sh:toolchain_type",
)
""".format(sh_path = sh_path))
sh_config = repository_rule(
environ = [
"WINDIR",
"PATH",
],
local = True,
implementation = _sh_config_impl,
)
def sh_configure():
"""Detect the local shell interpreter and register its toolchain."""
sh_config(name = "local_config_sh")
native.register_toolchains("@local_config_sh//:local_sh_toolchain")
``` |
{
"source": "jobeckham7/pear-admin-flask",
"score": 3
} |
#### File: common/script/initdb.py
```python
from dotenv import dotenv_values
import sqlparse
import pymysql
config = dotenv_values('.env')
# MySql配置信息
HOST = config.get('MYSQL_HOST') or '127.0.0.1'
PORT = config.get('MYSQL_PORT') or 3306
DATABASE = config.get('MYSQL_DATABASE') or 'PearAdminFlask'
USERNAME = config.get('MYSQL_USERNAME') or 'root'
PASSWORD = config.get('MYSQL_PASSWORD') or '<PASSWORD>'
def is_exist_database():
db = pymysql.connect(host=HOST, port=int(PORT), user=USERNAME, password=PASSWORD, charset='utf8mb4')
cursor1 = db.cursor()
sql = "select * from information_schema.SCHEMATA WHERE SCHEMA_NAME = '%s' ; " % DATABASE
res = cursor1.execute(sql)
db.close()
return res
def init_database():
db = pymysql.connect(host=HOST, port=int(PORT), user=USERNAME, password=PASSWORD, charset='utf8mb4')
cursor1 = db.cursor()
sql = "CREATE DATABASE IF NOT EXISTS %s" % DATABASE
res = cursor1.execute(sql)
db.close()
return res
def execute_fromfile(filename):
db = pymysql.connect(host=HOST, port=int(PORT), user=USERNAME, password=PASSWORD, database=DATABASE,
charset='utf8mb4')
fd = open(filename, 'r', encoding='utf-8')
cursor = db.cursor()
sqlfile = fd.read()
sqlfile = sqlparse.format(sqlfile, strip_comments=True).strip()
sqlcommamds = sqlfile.split(';')
for command in sqlcommamds:
try:
cursor.execute(command)
db.commit()
except Exception as msg:
db.rollback()
db.close()
def init_db():
if is_exist_database():
print('数据库已经存在,为防止误初始化,请手动删除 %s 数据库' % str(DATABASE))
return
if init_database():
print('数据库%s创建成功' % str(DATABASE))
execute_fromfile('test/pear.sql')
print('表创建成功')
print('欢迎使用pear-admin-flask,请使用 flask run 命令启动程序')
```
#### File: applications/schemas/admin_user.py
```python
from applications.extensions import ma
from marshmallow import fields
from applications.models import Dept
# 用户models的序列化类
class UserSchema(ma.Schema):
id = fields.Integer()
username = fields.Str()
realname = fields.Str()
enable = fields.Integer()
create_at = fields.DateTime()
update_at = fields.DateTime()
dept = fields.Method("get_dept")
def get_dept(self, obj):
if obj.dept_id != None:
return Dept.query.filter_by(id=obj.dept_id).first().dept_name
else:
return None
```
#### File: view/admin/dict.py
```python
from flask import Blueprint, render_template, request, jsonify
from applications.common import curd
from applications.common.helper import ModelFilter
from applications.common.utils.http import table_api, success_api, fail_api
from applications.common.utils.rights import authorize
from applications.common.utils.validate import xss_escape
from applications.extensions import db
from applications.models import DictType, DictData
from applications.schemas import DictTypeSchema, DictDataSchema
admin_dict = Blueprint('adminDict', __name__, url_prefix='/admin/dict')
# 数据字典
@admin_dict.get('/')
@authorize("admin:dict:main", log=True)
def main():
return render_template('admin/dict/main.html')
@admin_dict.get('/dictType/data')
@authorize("admin:dict:main", log=True)
def dict_type_data():
# 获取请求参数
type_name = xss_escape(request.args.get('typeName', type=str))
# 查询参数构造
mf = ModelFilter()
if type_name:
mf.vague(field_name="type_name", value=type_name)
# orm查询
# 使用分页获取data需要.items
dict_all = DictType.query.filter(mf.get_filter(DictType)).layui_paginate()
count = dict_all.total
data = curd.model_to_dicts(schema=DictTypeSchema, data=dict_all.items)
return table_api(data=data, count=count)
@admin_dict.get('/dictType/add')
@authorize("admin:dict:add", log=True)
def dict_type_add():
return render_template('admin/dict/add.html')
@admin_dict.post('/dictType/save')
@authorize("admin:dict:add", log=True)
def dict_type_save():
req_json = request.json
description = xss_escape(req_json.get("description"))
enable = xss_escape(req_json.get("enable"))
type_code = xss_escape(req_json.get("typeCode"))
type_name = xss_escape(req_json.get("typeName"))
d = DictType(type_name=type_name, type_code=type_code, enable=enable, description=description)
db.session.add(d)
db.session.commit()
if d.id is None:
return fail_api(msg="增加失败")
return success_api(msg="增加成功")
# 编辑字典类型
@admin_dict.get('/dictType/edit')
@authorize("admin:dict:edit", log=True)
def dict_type_edit():
_id = request.args.get('dictTypeId', type=int)
dict_type = DictType.query.filter_by(id=_id).first()
return render_template('admin/dict/edit.html', dict_type=dict_type)
# 编辑字典类型
@admin_dict.put('/dictType/update')
@authorize("admin:dict:edit", log=True)
def dict_type_update():
req_json = request.json
id = xss_escape(req_json.get("id"))
description = xss_escape(req_json.get("description"))
enable = xss_escape(req_json.get("enable"))
type_code = xss_escape(req_json.get("typeCode"))
type_name = xss_escape(req_json.get("typeName"))
DictType.query.filter_by(id=id).update({
"description": description,
"enable": enable,
"type_code": type_code,
"type_name": type_name
})
db.session.commit()
return success_api(msg="更新成功")
# 启用字典
@admin_dict.put('/dictType/enable')
@authorize("admin:dict:edit", log=True)
def dict_type_enable():
_id = request.json.get('id')
if id:
res = curd.enable_status(DictType,_id)
if not res:
return fail_api(msg="出错啦")
return success_api("启动成功")
return fail_api(msg="数据错误")
# 禁用字典
@admin_dict.put('/dictType/disable')
@authorize("admin:dict:edit", log=True)
def dict_type_dis_enable():
_id = request.json.get('id')
if id:
res = curd.disable_status(DictType,_id)
if not res:
return fail_api(msg="出错啦")
return success_api("禁用成功")
return fail_api(msg="数据错误")
# 删除字典类型
@admin_dict.delete('/dictType/remove/<int:_id>')
@authorize("admin:dict:remove", log=True)
def dict_type_delete(_id):
res = curd.delete_one_by_id(DictType,_id)
if not res:
return fail_api(msg="删除失败")
return success_api(msg="删除成功")
@admin_dict.get('/dictData/data')
@authorize("admin:dict:main", log=True)
def dict_code_data():
type_code = xss_escape(request.args.get('typeCode', type=str))
dict_data = DictData.query.filter_by(type_code=type_code).layui_paginate()
count = dict_data.total
data = curd.model_to_dicts(schema=DictDataSchema, data=dict_data.items)
return table_api(data=data, count=count)
# 增加字典数据
@admin_dict.get('/dictData/add')
@authorize("admin:dict:add", log=True)
def dict_data_add():
type_code = request.args.get('typeCode', type=str)
return render_template('admin/dict/data/add.html', type_code=type_code)
# 增加字典数据
@admin_dict.post('/dictData/save')
@authorize("admin:dict:add", log=True)
def dict_data_save():
req_json = request.json
data_label = xss_escape(req_json.get("dataLabel"))
data_value = xss_escape(req_json.get("dataValue"))
enable = xss_escape(req_json.get("enable"))
remark = xss_escape(req_json.get("remark"))
type_code = xss_escape(req_json.get("typeCode"))
d = DictData(data_label=data_label, data_value=data_value, enable=enable, remark=remark, type_code=type_code)
db.session.add(d)
db.session.commit()
if not d.id:
return jsonify(success=False, msg="增加失败")
return jsonify(success=True, msg="增加成功")
# 编辑字典数据
@admin_dict.get('/dictData/edit')
@authorize("admin:dict:edit", log=True)
def dict_data_edit():
_id = request.args.get('dataId', type=str)
dict_data = curd.get_one_by_id(DictData, _id)
return render_template('admin/dict/data/edit.html', dict_data=dict_data)
# 编辑字典数据
@admin_dict.put('/dictData/update')
@authorize("admin:dict:edit", log=True)
def dict_data_update():
req_json = request.json
id = req_json.get("dataId")
DictData.query.filter_by(id=id).update({
"data_label": xss_escape(req_json.get("dataLabel")),
"data_value": xss_escape(req_json.get("dataValue")),
"enable": xss_escape(req_json.get("enable")),
"remark": xss_escape(req_json.get("remark")),
"type_code": xss_escape(req_json.get("typeCode"))
})
db.session.commit()
return success_api(msg="更新成功")
# 启用字典数据
@admin_dict.put('/dictData/enable')
@authorize("admin:dict:edit", log=True)
def dict_data_enable():
_id = request.json.get('dataId')
if _id:
res = curd.enable_status(model=DictData, id=_id)
if not res:
return fail_api(msg="出错啦")
return success_api(msg="启动成功")
return fail_api(msg="数据错误")
# 禁用字典数据
@admin_dict.put('/dictData/disable')
@authorize("admin:dict:edit", log=True)
def dict_data_disenable():
_id = request.json.get('dataId')
if _id:
res = curd.disable_status(model=DictData, id=_id)
if not res:
return fail_api(msg="出错啦")
return success_api(msg="禁用成功")
return fail_api(msg="数据错误")
# 删除字典类型
@admin_dict.delete('dictData/remove/<int:id>')
@authorize("admin:dict:remove", log=True)
def dict_data_delete(id):
res = curd.delete_one_by_id(model=DictData, id=id)
if not res:
return fail_api(msg="删除失败")
return success_api(msg="删除成功")
``` |
{
"source": "jobec/rfc5424-logging-handler",
"score": 2
} |
#### File: rfc5424-logging-handler/rfc5424logging/transport.py
```python
import io
import socket
import ssl
import sys
if sys.version_info.major == 3:
text_stream_types = io.TextIOBase
bytes_stream_types = io.BufferedIOBase
else:
text_stream_types = io.TextIOBase
bytes_stream_types = io.BufferedIOBase, file # noqa: F821
SYSLOG_PORT = 514
# RFC6587 framing
FRAMING_OCTET_COUNTING = 1
FRAMING_NON_TRANSPARENT = 2
class TCPSocketTransport:
def __init__(self, address, timeout, framing):
self.socket = None
self.address = address
self.timeout = timeout
self.framing = framing
self.open()
def open(self):
error = None
host, port = self.address
addrinfo = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
if not addrinfo:
raise OSError("getaddrinfo returns an empty list")
for entry in addrinfo:
family, socktype, _, _, sockaddr = entry
try:
self.socket = socket.socket(family, socktype)
self.socket.settimeout(self.timeout)
self.socket.connect(sockaddr)
# Connected successfully. Erase any previous errors.
error = None
break
except OSError as e:
error = e
if self.socket is not None:
self.socket.close()
if error is not None:
raise error
def transmit(self, syslog_msg):
# RFC6587 framing
if self.framing == FRAMING_NON_TRANSPARENT:
syslog_msg = syslog_msg.replace(b"\n", b"\\n")
syslog_msg = b"".join((syslog_msg, b"\n"))
else:
syslog_msg = b" ".join((str(len(syslog_msg)).encode("ascii"), syslog_msg))
try:
self.socket.sendall(syslog_msg)
except (OSError, IOError):
self.close()
self.open()
self.socket.sendall(syslog_msg)
def close(self):
self.socket.close()
class TLSSocketTransport(TCPSocketTransport):
def __init__(
self,
address,
timeout,
framing,
tls_ca_bundle,
tls_verify,
tls_client_cert,
tls_client_key,
tls_key_password,
):
self.tls_ca_bundle = tls_ca_bundle
self.tls_verify = tls_verify
self.tls_client_cert = tls_client_cert
self.tls_client_key = tls_client_key
self.tls_key_password = tls_key_password
super(TLSSocketTransport, self).__init__(address, timeout, framing=framing)
def open(self):
super(TLSSocketTransport, self).open()
context = ssl.create_default_context(
purpose=ssl.Purpose.SERVER_AUTH, cafile=self.tls_ca_bundle
)
context.verify_mode = ssl.CERT_REQUIRED if self.tls_verify else ssl.CERT_NONE
server_hostname, _ = self.address
if self.tls_client_cert:
context.load_cert_chain(
self.tls_client_cert, self.tls_client_key, self.tls_key_password
)
self.socket = context.wrap_socket(self.socket, server_hostname=server_hostname)
class UDPSocketTransport:
def __init__(self, address, timeout):
self.socket = None
self.address = address
self.timeout = timeout
self.open()
def open(self):
error = None
host, port = self.address
addrinfo = socket.getaddrinfo(host, port, 0, socket.SOCK_DGRAM)
if not addrinfo:
raise OSError("getaddrinfo returns an empty list")
for entry in addrinfo:
family, socktype, _, _, sockaddr = entry
try:
self.socket = socket.socket(family, socktype)
self.socket.settimeout(self.timeout)
self.address = sockaddr
break
except OSError as e:
error = e
if self.socket is not None:
self.socket.close()
if error is not None:
raise error
def transmit(self, syslog_msg):
try:
self.socket.sendto(syslog_msg, self.address)
except (OSError, IOError):
self.close()
self.open()
self.socket.sendto(syslog_msg, self.address)
def close(self):
self.socket.close()
class UnixSocketTransport:
def __init__(self, address, socket_type):
self.socket = None
self.address = address
self.socket_type = socket_type
self.open()
def open(self):
if self.socket_type is None:
socket_types = [socket.SOCK_DGRAM, socket.SOCK_STREAM]
else:
socket_types = [self.socket_type]
for type_ in socket_types:
# Syslog server may be unavailable during handler initialisation.
# So we ignore connection errors
try:
self.socket = socket.socket(socket.AF_UNIX, type_)
self.socket.connect(self.address)
self.socket_type = type_
break
except OSError:
if self.socket is not None:
self.socket.close()
def transmit(self, syslog_msg):
try:
self.socket.send(syslog_msg)
except (OSError, IOError):
self.close()
self.open()
self.socket.send(syslog_msg)
def close(self):
self.socket.close()
class StreamTransport:
def __init__(self, stream):
if isinstance(stream, text_stream_types):
self.text_mode = True
elif isinstance(stream, bytes_stream_types):
self.text_mode = False
else:
raise ValueError("Stream is not of a valid stream type")
if not stream.writable():
raise ValueError("Stream is not a writeable stream")
self.stream = stream
def transmit(self, syslog_msg):
syslog_msg = syslog_msg + b"\n"
if self.text_mode:
syslog_msg = syslog_msg.decode(self.stream.encoding, "replace")
self.stream.write(syslog_msg)
def close(self):
# Closing the stream is left up to the user.
pass
``` |
{
"source": "jobedylbas/querido-diario",
"score": 3
} |
#### File: gazette/spiders/rj_rio_de_janeiro.py
```python
import datetime
import scrapy
from dateutil.rrule import DAILY, rrule
from gazette.items import Gazette
from gazette.spiders.base import BaseGazetteSpider
class RjRioDeJaneiroSpider(BaseGazetteSpider):
TERRITORY_ID = "3304557"
name = "rj_rio_de_janeiro"
allowed_domains = ["doweb.rio.rj.gov.br"]
start_date = datetime.date(2006, 3, 16)
def start_requests(self):
for date in rrule(
freq=DAILY, dtstart=self.start_date, until=datetime.date.today()
):
day = str(date.day).zfill(2)
month = str(date.month).zfill(2)
url = f"https://doweb.rio.rj.gov.br/apifront/portal/edicoes/edicoes_from_data/{date.year}-{month}-{day}.json"
yield scrapy.Request(url=url, cb_kwargs={"gazette_date": date.date()})
def parse(self, response, gazette_date):
gazette_data = response.json()
if gazette_data["erro"]:
return
items = gazette_data.get("itens", [])
for item in items:
gazette_id = item["id"]
gazette_url = (
f"https://doweb.rio.rj.gov.br/portal/edicoes/download/{gazette_id}"
)
is_extra_edition = item["suplemento"] == 1
yield Gazette(
date=gazette_date,
file_urls=[gazette_url],
is_extra_edition=is_extra_edition,
power="executive",
)
```
#### File: gazette/spiders/rr_boa_vista.py
```python
import scrapy
import w3lib.url
from dateparser import parse
from gazette.items import Gazette
from gazette.spiders.base import BaseGazetteSpider
class RrBoaVistaSpider(BaseGazetteSpider):
TERRITORY_ID = "1400100"
name = "rr_boa_vista"
allowed_domains = ["boavista.rr.gov.br"]
start_urls = ["https://www.boavista.rr.gov.br/diario-oficial"]
def parse(self, response):
options = response.xpath('//*[@id="Periodo"]/optgroup/option/@value')
for option in options:
data = option.extract()
url = w3lib.url.add_or_replace_parameter(response.url, "Periodo", data)
yield scrapy.Request(url, self.parse_period)
def parse_period(self, response):
div_list = response.xpath('//*[@class="bldownload"]')
for div in div_list:
content = div.xpath("./div/text()").extract()
date = parse(content[1], languages=["pt"]).date()
url = div.xpath("./a/@href").extract_first()
url = response.urljoin(url)
power = "executive_legislature"
yield Gazette(
date=date, file_urls=[url], is_extra_edition=False, power=power,
)
```
#### File: gazette/spiders/sc_florianopolis.py
```python
import re
from datetime import date
from dateparser import parse
from dateutil.rrule import MONTHLY, rrule
from scrapy import FormRequest
from gazette.items import Gazette
from gazette.spiders.base import BaseGazetteSpider
class ScFlorianopolisSpider(BaseGazetteSpider):
name = "sc_florianopolis"
TERRITORY_ID = "4205407"
start_date = date(2009, 6, 1)
def start_requests(self):
end_date = date.today()
periods_of_interest = [
(date.year, date.month)
for date in rrule(freq=MONTHLY, dtstart=self.start_date, until=end_date)
]
for year, month in periods_of_interest:
data = dict(ano=str(year), mes=str(month), passo="1", enviar="")
yield FormRequest(
"http://www.pmf.sc.gov.br/governo/index.php?pagina=govdiariooficial",
formdata=data,
)
def parse(self, response):
for link in response.css("ul.listagem li a"):
url = self.get_pdf_url(response, link)
if not url:
continue
gazette_date = self.get_date(link)
gazette_edition_number = link.css("::attr(title)").re_first(r"Edição (\d+)")
yield Gazette(
date=gazette_date,
edition_number=gazette_edition_number,
file_urls=(url,),
is_extra_edition=self.is_extra(link),
power="executive_legislature",
)
@staticmethod
def get_pdf_url(response, link):
relative_url = link.css("::attr(href)").extract_first()
if not relative_url.lower().endswith(".pdf"):
return None
return response.urljoin(relative_url)
@staticmethod
def get_date(link):
text = " ".join(link.css("::text").extract())
pattern = r"\d{1,2}\s+de\s+\w+\s+de\s+\d{4}"
match = re.search(pattern, text)
if not match:
return None
return parse(match.group(), languages=("pt",)).date()
@staticmethod
def is_extra(link):
text = " ".join(link.css("::text").extract())
return "extra" in text.lower()
``` |
{
"source": "Jobegiar99/Garden-Palooza",
"score": 3
} |
#### File: game/levelGenerator/grassDecorator.py
```python
from .orientationDictionary import OrientationDictionary
from copy import deepcopy
class GrassDecorator:
def updateCells(
self,
matrix,
spriteIndexes,
createdPath,
groundList,
GRASS_SPRITE_INDEX,
GROUND_SPRITE_INDEX,
):
orientationDictionary = OrientationDictionary(spriteIndexes)
spriteDict = orientationDictionary.spriteDict
cornerDict = orientationDictionary.cornerDict
tempMatrix = deepcopy(matrix)
for cell in createdPath:
cellInfo = [[0, 0, 0], [0, 1, 0], [0, 0, 0]]
self.fillCellInfo(cellInfo, cell, matrix, GROUND_SPRITE_INDEX)
self.updateSprite(
cellInfo, spriteDict, cell[0], cell[1], tempMatrix, GRASS_SPRITE_INDEX
)
self.groundSpriteUpdate(
cell, matrix, cornerDict, tempMatrix, cellInfo, GROUND_SPRITE_INDEX
)
self.checkDiagonals(cellInfo, cell, matrix, GROUND_SPRITE_INDEX)
for info in cornerDict:
if cellInfo in info:
tempMatrix[cell[0]][cell[1]] = info[1]
return tempMatrix
def fillCellInfo(self, cellInfo, cell, matrix, spriteToCheck):
row, col = cell
if (
0 <= row - 1 < len(matrix)
and 0 <= col < len(matrix[0])
and matrix[row - 1][col] == spriteToCheck
):
cellInfo[0][1] = 1
if (
0 <= row + 1 < len(matrix)
and 0 <= col < len(matrix[0])
and matrix[row + 1][col] == spriteToCheck
):
cellInfo[2][1] = 1
if (
0 <= row < len(matrix)
and 0 <= col + 1 < len(matrix[0])
and matrix[row][col + 1] == spriteToCheck
):
cellInfo[1][2] = 1
if (
0 <= row < len(matrix)
and 0 <= col - 1 < len(matrix[0])
and matrix[row][col - 1] == spriteToCheck
):
cellInfo[1][0] = 1
def checkDiagonals(self, cellInfo, cell, matrix, spriteToCheck):
row, col = cell
if (
0 <= row - 1 < len(matrix)
and 0 <= col - 1 < len(matrix[0])
and matrix[row - 1][col - 1] == spriteToCheck
):
cellInfo[0][0] = 1
if (
0 <= row - 1 < len(matrix)
and 0 <= col + 1 < len(matrix[0])
and matrix[row - 1][col + 1] == spriteToCheck
):
cellInfo[0][2] = 1
if (
0 <= row + 1 < len(matrix)
and 0 <= col + 1 < len(matrix[0])
and matrix[row + 1][col + 1] == spriteToCheck
):
cellInfo[2][2] = 1
if (
0 <= row + 1 < len(matrix)
and 0 <= col - 1 < len(matrix[0])
and matrix[row + 1][col - 1] == spriteToCheck
):
cellInfo[2][0] = 1
def updateSprite(
self, cellInfo, spriteDict, row, column, tempMatrix, defaultSprite
):
found = False
for spriteInfo in spriteDict:
if cellInfo in spriteInfo[0]:
tempMatrix[row][column] = spriteInfo[1]
found = True
if not found:
tempMatrix[row][column] = defaultSprite
def groundSpriteUpdate(
self, cell, matrix, cornerDict, tempMatrix, cellInfo, GROUND_SPRITE_INDEX
):
row, column = cell
if cellInfo == cornerDict[0][0]:
if (
row + 1 < len(matrix)
and column + 1 < len(matrix[0])
and matrix[row + 1][column - 1] == GROUND_SPRITE_INDEX
):
if (
matrix[row][column + 1] == GROUND_SPRITE_INDEX
and matrix[row + 1][column + 1] == GROUND_SPRITE_INDEX
):
tempMatrix[row + 1][column + 1] = cornerDict[0][1]
if cellInfo == cornerDict[1][1]:
if (
row + 1 < len(matrix)
and 0 <= column - 1
and matrix[row + 1][column - 1] == GROUND_SPRITE_INDEX
):
if (
matrix[row][column - 1] == GROUND_SPRITE_INDEX
and matrix[row + 1][column - 1] == GROUND_SPRITE_INDEX
):
tempMatrix[row + 1][column - 1] = cornerDict[1][1]
if cellInfo == cornerDict[2][0]:
if (
0 <= row - 1 < len(matrix)
and 0 <= column - 1
and matrix[row - 1][column - 1] == GROUND_SPRITE_INDEX
):
if (
matrix[row][column - 1] == GROUND_SPRITE_INDEX
and matrix[row - 1][column - 1] == GROUND_SPRITE_INDEX
):
tempMatrix[row - 1][column - 1] = cornerDict[2][1]
if cellInfo == cornerDict[3][0]:
if (
0 <= row - 1 < len(matrix)
and column + 1 < len(matrix[0])
and matrix[row - 1][column - 1] == GROUND_SPRITE_INDEX
):
if (
matrix[row - 1][column + 1] == GROUND_SPRITE_INDEX
and matrix[row][column + 1] == GROUND_SPRITE_INDEX
):
tempMatrix[row - 1][column + 1] = cornerDict[3][0]
```
#### File: game/levelGenerator/levelGenerator.py
```python
from .skeletonGenerator import SkeletonGenerator
from .grassDecorator import GrassDecorator
from .riverSkeletonGenerator import RiverSkeletonGenerator
from .riverDecorator import RiverDecorator
from .groundDecorator import GroundDecorator
from .waterExtraDecorator import WaterExtraDecorator
from .grassExtraDecorator import GrassExtraDecorator
from .groundExtraDecorator import GroundExtraDecorator
from copy import deepcopy
import random
def GenerateLevel():
GROUND_SPRITE_INDEX = 2363
GRASS_SPRITE_INDEX = 2420
WATER_SPRITE_INDEX = 2717
# up,down,left,right,upperCornerLeft, upperCornerRight, lowerCornerRight, lowerCornerLeft
# edge cases sprites: upperCornerLeft, upperCornerRight, lowerCornerRight, lowerCornerLeft
GRASS_SPRITE_MAP = [
2361,
2479,
2419,
2421,
2480,
2478,
2362,
2360,
2422,
2423,
2482,
2481,
]
# up,down,left,right,upperCornerLeft, upperCornerRight, lowerCornerRight, lowerCornerLeft
# edge cases sprites: upperCornerLeft, upperCornerRight, lowerCornerRight, lowerCornerLeft
WATER_SPRITE_MAP = [
2715,
2833,
2773,
2775,
2834,
2832,
2716,
2714,
2776,
2777,
2836,
2835,
]
GROUND_SPRITE_MAP = [GROUND_SPRITE_INDEX, 2483, 2484, 2485]
WATER_EXTRA_SPRITE_MAP = [2896, 2897, 2898, 2899]
GRASS_EXTRA_SPRITE_MAP = [2609, 2610, 2611]
GRASS_EXTRA_SPRITE_MAP2 = [2186, 2187, 2306, 2307, 2308, 2309, 2310]
GROUND_EXTRA_SPRITE_MAP = [2491, 2550]
levelSkeleton = SkeletonGenerator(GROUND_SPRITE_INDEX, GRASS_SPRITE_INDEX)
firstLayer = levelSkeleton.matrix
firstLayer = GrassDecorator().updateCells(
firstLayer,
GRASS_SPRITE_MAP,
levelSkeleton.createdPath,
levelSkeleton.ground,
GRASS_SPRITE_INDEX,
GROUND_SPRITE_INDEX,
)
waterSkeleton = RiverSkeletonGenerator(
firstLayer, WATER_SPRITE_INDEX, GROUND_SPRITE_INDEX
)
if len(waterSkeleton.waterSpot) > 0:
firstLayer = RiverDecorator().updateCells(
firstLayer,
WATER_SPRITE_INDEX,
waterSkeleton.waterSpot,
GROUND_SPRITE_INDEX,
WATER_SPRITE_MAP,
)
WaterExtraDecorator(firstLayer, waterSkeleton.waterSpot, WATER_EXTRA_SPRITE_MAP)
GroundDecorator(firstLayer, GROUND_SPRITE_MAP)
secondLayer = deepcopy(firstLayer)
GrassExtraDecorator(
secondLayer,
GRASS_EXTRA_SPRITE_MAP,
GRASS_EXTRA_SPRITE_MAP2,
levelSkeleton.createdPath,
GRASS_SPRITE_INDEX,
)
GroundExtraDecorator(
secondLayer, GROUND_EXTRA_SPRITE_MAP, levelSkeleton.ground, GROUND_SPRITE_MAP
)
spawnPoint = levelSkeleton.createdPath[
random.randint(0, len(levelSkeleton.createdPath) - 1)
]
return {
"firstLayer": firstLayer,
"secondLayer": secondLayer,
"playerRow": spawnPoint[1] * 16,
"playerColumn": spawnPoint[0] * 16,
}
```
#### File: game/levelGenerator/orientationDictionary.py
```python
class OrientationDictionary:
def __init__(self, spriteIndexes):
up = [[[0, 1, 0], [0, 1, 0], [0, 0, 0]], [[0, 1, 0], [1, 1, 1], [0, 0, 0]]]
down = [[[0, 0, 0], [0, 1, 0], [0, 1, 0]], [[0, 0, 0], [1, 1, 1], [0, 1, 0]]]
left = [[[0, 0, 0], [1, 1, 0], [0, 0, 0]], [[0, 1, 0], [1, 1, 0], [0, 1, 0]]]
right = [[[0, 0, 0], [0, 1, 1], [0, 0, 0]], [[0, 1, 0], [0, 1, 1], [0, 1, 0]]]
upperCornerLeft = [[[0, 0, 0], [0, 1, 1], [0, 1, 0]]]
upperCornerRight = [[[0, 0, 0], [1, 1, 0], [0, 1, 0]]]
lowerCornerRight = [[[0, 1, 0], [1, 1, 0], [0, 0, 0]]]
lowerCornerLeft = [[[0, 1, 0], [0, 1, 1], [0, 0, 0]]]
self.spriteDict = [
[up, spriteIndexes[0]],
[down, spriteIndexes[1]],
[left, spriteIndexes[2]],
[right, spriteIndexes[3]],
[upperCornerLeft, spriteIndexes[4]],
[upperCornerRight, spriteIndexes[5]],
[lowerCornerLeft, spriteIndexes[6]],
[lowerCornerRight, spriteIndexes[7]],
]
leftUpperCorner = [[0, 0, 0], [0, 1, 0], [0, 0, 1]]
rightUpperCorner = [[0, 0, 0], [0, 1, 0], [1, 0, 0]]
rightLowerCorner = [[1, 0, 0], [0, 1, 0], [0, 0, 0]]
leftLowerCorner = [[0, 0, 1], [0, 1, 0], [0, 0, 0]]
self.cornerDict = [
[leftUpperCorner, spriteIndexes[-4]],
[rightUpperCorner, spriteIndexes[-3]],
[rightLowerCorner, spriteIndexes[-2]],
[leftLowerCorner, spriteIndexes[-1]],
]
```
#### File: game/levelGenerator/riverSkeletonGenerator.py
```python
from copy import deepcopy
from collections import deque
import random
class RiverSkeletonGenerator:
def __init__(self, matrix, WATER_SPRITE_INDEX, GROUND_SPRITE_INDEX):
self.matrix = []
self.waterSpot = []
groundSpots = []
for row in range(len(matrix)):
for column in range(len(matrix)):
if (
matrix[row][column] == GROUND_SPRITE_INDEX
and [row, column] not in groundSpots
):
currentArea = []
queue = deque()
queue.append([row, column])
self.BFS(deepcopy(matrix), GROUND_SPRITE_INDEX, currentArea, queue)
groundPercentage = (len(currentArea) * 100) / (
len(matrix) * len(matrix[0])
)
if 1 <= groundPercentage:
groundSpots.append(currentArea)
if len(groundSpots) > 0:
self.waterSpot = groundSpots[random.randint(0, len(groundSpots) - 1)]
self.createRiver(
matrix, self.waterSpot, WATER_SPRITE_INDEX, GROUND_SPRITE_INDEX
)
def BFS(self, matrix, GROUND_SPRITE_INDEX, currentArea, queue):
while len(queue) > 0:
row, column = queue.pop()
currentArea.append([row, column])
self.BFSHelper(
matrix, row - 1, column, GROUND_SPRITE_INDEX, currentArea, queue
)
self.BFSHelper(
matrix, row + 1, column, GROUND_SPRITE_INDEX, currentArea, queue
)
self.BFSHelper(
matrix, row, column - 1, GROUND_SPRITE_INDEX, currentArea, queue
)
self.BFSHelper(
matrix, row, column + 1, GROUND_SPRITE_INDEX, currentArea, queue
)
def BFSHelper(self, matrix, row, column, GROUND_SPRITE_INDEX, currentArea, queue):
if (
0 <= row < len(matrix)
and 0 <= column < len(matrix[0])
and matrix[row][column] == GROUND_SPRITE_INDEX
):
matrix[row][column] = -1
queue.append([row, column])
def createRiver(self, matrix, waterSpot, WATER_SPRITE_INDEX, GROUND_SPRITE_INDEX):
for point in waterSpot:
row, column = point
if matrix[row][column] == GROUND_SPRITE_INDEX:
matrix[row][column] = WATER_SPRITE_INDEX
```
#### File: game/levelGenerator/waterExtraDecorator.py
```python
import random
class WaterExtraDecorator:
def __init__(self, matrix, waterSpot, WATER_EXTRA_SPRITE_MAP):
for point in waterSpot:
row, column = point
if random.uniform(0, 100) < 0.4:
matrix[row][column] = WATER_EXTRA_SPRITE_MAP[
random.randint(0, len(WATER_EXTRA_SPRITE_MAP) - 1)
]
``` |
{
"source": "jobel-code/python-arango",
"score": 2
} |
#### File: python-arango/arango/wal.py
```python
from __future__ import absolute_import, unicode_literals
__all__ = ['WAL']
from arango.api import APIWrapper
from arango.exceptions import (
WALFlushError,
WALPropertiesError,
WALConfigureError,
WALTransactionListError
)
from arango.request import Request
class WAL(APIWrapper): # pragma: no cover
"""WAL (Write-Ahead Log) API wrapper.
:param connection: HTTP connection.
:type connection: arango.connection.Connection
:param executor: API executor.
:type executor: arango.executor.Executor
"""
def __init__(self, connection, executor):
super(WAL, self).__init__(connection, executor)
# noinspection PyMethodMayBeStatic
def _format_properties(self, body):
"""Format WAL properties.
:param body: Response body.
:type body: dict
:return: Formatted body.
:rtype: dict
"""
if 'allowOversizeEntries' in body:
body['oversized_ops'] = body.pop('allowOversizeEntries')
if 'logfileSize' in body:
body['log_size'] = body.pop('logfileSize')
if 'historicLogfiles' in body:
body['historic_logs'] = body.pop('historicLogfiles')
if 'reserveLogfiles' in body:
body['reserve_logs'] = body.pop('reserveLogfiles')
if 'syncInterval' in body:
body['sync_interval'] = body.pop('syncInterval')
if 'throttleWait' in body:
body['throttle_wait'] = body.pop('throttleWait')
if 'throttleWhenPending' in body:
body['throttle_limit'] = body.pop('throttleWhenPending')
return body
def properties(self):
"""Return WAL properties.
:return: WAL properties.
:rtype: dict
:raise arango.exceptions.WALPropertiesError: If retrieval fails.
"""
request = Request(
method='get',
endpoint='/_admin/wal/properties'
)
def response_handler(resp):
if not resp.is_success:
raise WALPropertiesError(resp, request)
return self._format_properties(resp.body)
return self._execute(request, response_handler)
def configure(self,
oversized_ops=None,
log_size=None,
historic_logs=None,
reserve_logs=None,
throttle_wait=None,
throttle_limit=None):
"""Configure WAL properties.
:param oversized_ops: If set to True, operations bigger than a single
log file are allowed to be executed and stored.
:type oversized_ops: bool
:param log_size: Size of each write-ahead log file in bytes.
:type log_size: int
:param historic_logs: Max number of historic log files to keep.
:type historic_logs: int
:param reserve_logs: Max number of reserve log files to allocate.
:type reserve_logs: int
:param throttle_wait: Wait time before aborting when write-throttled
in milliseconds.
:type throttle_wait: int
:param throttle_limit: Number of pending garbage collector operations
that, when reached, activates write-throttling. Value of 0 means
no throttling is triggered.
:type throttle_limit: int
:return: New WAL properties.
:rtype: dict
:raise arango.exceptions.WALConfigureError: If operation fails.
"""
data = {}
if oversized_ops is not None:
data['allowOversizeEntries'] = oversized_ops
if log_size is not None:
data['logfileSize'] = log_size
if historic_logs is not None:
data['historicLogfiles'] = historic_logs
if reserve_logs is not None:
data['reserveLogfiles'] = reserve_logs
if throttle_wait is not None:
data['throttleWait'] = throttle_wait
if throttle_limit is not None:
data['throttleWhenPending'] = throttle_limit
request = Request(
method='put',
endpoint='/_admin/wal/properties',
data=data
)
def response_handler(resp):
if not resp.is_success:
raise WALConfigureError(resp, request)
return self._format_properties(resp.body)
return self._execute(request, response_handler)
def transactions(self):
"""Return details on currently running WAL transactions.
Fields in the returned details are as follows:
.. code-block:: none
"last_collected" : ID of the last collected log file (at the
start of each running transaction) or None
if no transactions are running.
"last_sealed" : ID of the last sealed log file (at the start
of each running transaction) or None if no
transactions are running.
"count" : Number of currently running transactions.
:return: Details on currently running WAL transactions.
:rtype: dict
:raise arango.exceptions.WALTransactionListError: If retrieval fails.
"""
request = Request(
method='get',
endpoint='/_admin/wal/transactions'
)
def response_handler(resp):
if not resp.is_success:
raise WALTransactionListError(resp, request)
if 'minLastCollected' in resp.body:
resp.body['last_collected'] = resp.body.pop('minLastCollected')
if 'minLastSealed' in resp.body:
resp.body['last_sealed'] = resp.body.pop('minLastSealed')
if 'runningTransactions' in resp.body:
resp.body['count'] = resp.body.pop('runningTransactions')
return resp.body
return self._execute(request, response_handler)
def flush(self, sync=True, garbage_collect=True):
"""Synchronize WAL to disk.
:param sync: Block until the synchronization is complete.
:type sync: bool
:param garbage_collect: Block until flushed data is garbage collected.
:type garbage_collect: bool
:return: True if WAL was flushed successfully.
:rtype: bool
:raise arango.exceptions.WALFlushError: If flush operation fails.
"""
request = Request(
method='put',
endpoint='/_admin/wal/flush',
params={
'waitForSync': sync,
'waitForCollector': garbage_collect
}
)
def response_handler(resp):
if not resp.is_success:
raise WALFlushError(resp, request)
return True
return self._execute(request, response_handler)
``` |
{
"source": "Jobenland/MFC-WebApp",
"score": 2
} |
#### File: app/api/api_routes.py
```python
from flask import jsonify
from flask_login import login_required
from . import api
from ..utils import timestamp
@api.route('/now')
@login_required
def get_current_time():
""" Get Current Time """
return jsonify(results='success', time=timestamp())
``` |
{
"source": "Joberman/process_sat",
"score": 3
} |
#### File: process_sat/process_sat/out_geo.py
```python
import sys
from itertools import izip
import datetime
import warnings
import pdb
import numpy
import netCDF4
import utils
def vsnmsg(version):
return "This file was generated using WHIPS v{0}".format(version)
def ValidOutfuncs():
'''Return a list of valid output function names'''
currentModule = sys.modules[__name__]
names = dir(currentModule)
return [el[:-9] for el in names if el.endswith("_out_func")]
class out_func:
'''Abstract class to for <>_out_geo classes'''
def __init__(self, parmDict=None):
self.parmDict = parmDict
def __call__(self, map_geo, griddef, outfilenames, verbose, version):
raise NotImplementedError
@staticmethod
def parm_list():
raise NotImplementedError
@staticmethod
def required_parms():
raise NotImplementedError
def _OMNO2e_formula(cloudFrac, fieldOfView):
eps = 1.5*pow(10,15)*(1+3*cloudFrac)
capE = pow(10,-16)*(18.5+2.8*pow(10,-4)*pow(abs(fieldOfView-29.7), 3.5))
return pow((eps*capE), -2)
class invalidPixCeption(Exception):
pass
tai93conv = lambda(timestring):utils.timestr_to_nsecs(timestring,
'00:00:00_01-01-1993', '%H:%M:%S_%m-%d-%Y')
def boolCaster(boolStr):
if boolStr == 'True':
return True
elif boolStr == 'False':
return False
else:
msg = 'Attempt to cast invalid string %s to boolean' % boolStr
raise TypeError(msg)
# currently borked. No immediate plans to fix
#class OMNO2e_wght_avg_out_func(out_func):
class OMNO2e_wght_avg_BORKED(out_func):
'''
Weighted avg based on OMNO2e algorithm
Note: this algorithm doesn't note what to do when the weight for a term is
nonzero, but the term itself contains a fillValue. This assumption
is checked by an assert statement in the code, so it won't be checked
if optimization is requested
OMNO2e algorithm and theoretical basis laid out
at <http://disc.sci.gsfc.nasa.gov/Aura/data-holdings/OMI/omno2e_v003.shtml>
Set up to work specifically for OMI instruments.
parameters dict must contain keys:
toAvg
overallQualFlag
cloudFrac
solarZenithAngle
cloudFractUpperCutoff
solarZenAngUpperCutoff
pixIndXtrackAxis
fillVal
writes out a single file of name outfilename
when called. That file is an ASCII representation
weighted averages for each gridcell. It is a csv
file with all numbers in e-format scientific
notation. Cells without valid measurements contain the fillvalue.
'''
@staticmethod
def parm_list():
return ['toAvg', 'overallQualFlag', 'cloudFrac',
'solarZenithAngle', 'cloudFractUpperCutoff',
'pixIndXtrackAxis', 'fillVal']
@staticmethod
def required_parms():
return {'toAvg' : ('The name of the field to be averaged',None),
'overallQualFlag' : ('The name of the field containing' \
' the overall quality flag for the' \
' pixels. This flag should be true' \
' (1) for invalid pixels and false' \
' (0) for valid pixels.\n{ OMI KNMI' \
' - TroposphericColumnFlag\n OMI NASA' \
' - vcdQualityFlags }',None),
'cloudFrac' : ('The name of the field containing the ' \
'cloud fractions.\n{ OMI KNMI - CloudFraction' \
'\n OMI NASA - CloudFraction }',None),
'solarZenithAngle' : ('The name of the field containing the ' \
'solar zenith angles in degrees.\n{ ' \
'OMI KNMI - SolarZenithAngle\n OMI' \
' NASA - SolarZenithAngle }',None),
'cloudFractUpperCutoff' : ('The maximum cloud fraction to ' \
'allow before excluding pixel ' \
'from average. Suggested value ' \
'from NASA is 0.3','decimal'),
'solarZenAngUpperCutoff' : ('The maximum solar zenith angle ' \
'to allow before excluding pixel ' \
'from average. Suggested value ' \
'from NASA is 85. Must be in ' \
'degrees.','decimal'),
'pixIndXtrackAxis' : ('The dimension order (0 based) of the ' \
'of the "cross-track" dimension (which' \
'ever dimension has size 60). For all' \
' currently known cases should be 1 ' \
' (may change in future versions of ' \
'OMI products).','int'),
'fillVal' : ('The value to use as a fill value in the output ' \
'netCDF file. This value will replace any missing '\
' or invalid output values','decimal')}
# userKeys not necessary, so 'filler' field used instead
__userKeys__ = "filetype"
def __call__(self, maps, griddef, outfilename, verbose, version):
# function is broken until it can be refactored such that
# _OMNO2e_func doesn't require totFlag. Needs to have pixel
# loop to check each
BORKED = 2
print('THIS FUNCTION IS BORKED. ABORT! ABORT! ABORT!')
sys.exit(BORKED)
# even though IO interface handles casting already
# a catchblock has been added here for safety
# in case someone wants to use this class directly
castDict = {'toAvg':str, 'overallQualFlag':str,
'cloudFrac':str, 'solarZenithAngle':str,
'cloudFractUpperCutoff':float,
'solarZenAngUpperCutoff':float,
'pixIndXtrackAxis':int, 'fillVal':float}
for (k,func) in castDict.items():
self.parmDict[k] = func(self.parmDict[k])
'''Write out single weighted-avg file'''
numpy.seterr(over='raise')
nRows = griddef.indLims()[1] - griddef.indLims()[0] + 1
nCols = griddef.indLims()[3] - griddef.indLims()[2] + 1
sum_weights = numpy.zeros((nRows, nCols))
sum_weighted_vals = numpy.zeros((nRows, nCols))
if not isinstance(maps, list):
maps = [maps] # create list if we didn't get one
for map in maps:
with map.pop('parser') as p: # pop out so we can loop
if verbose:
print('Processing {0} for output at {1}.'.format(\
p.name, str(datetime.datetime.now())))
for (k,v) in map.iteritems():
sumFlag = numpy.array([p.get_cm(self.parmDict['overallQualFlag'], pxind)
for (pxind, unused_weight) in v])
sumFlag = numpy.mod(sumFlag, 2)
cFrac = numpy.array([p.get_cm(self.parmDict['cloudFrac'], pxind)
for (pxind, unused_weight) in v])
cFracFlag = cFrac > self.parmDict['cloudFractUpperCutoff']
solZen = numpy.array([p.get_cm(self.parmDict['solarZenithAngle'], pxind)
for (pxind, unused_weight) in v])
solZenFlag = solZen > self.parmDict['solarZenAngUpperCutoff']
totFlag = numpy.logical_or(numpy.logical_or(sumFlag, cFracFlag), solZenFlag)
fov = numpy.array([pxind[self.parmDict['pixIndXtrackAxis']]
for (pxind, unused_weight) in v])
toAvg = numpy.array([p.get_cm(self.parmDict['toAvg'], pxind)
for (pxind, unused_weight) in v])
# BORKED
weights = 0
# weights = _OMNO2e_formula(cFrac, fov)
assert ~any(numpy.logical_and(~numpy.isnan(weights), numpy.isnan(toAvg)))
sumWeight = numpy.nansum(weights)
sumWeightVals = numpy.nansum(toAvg*weights)
# only add if we had some element (otherwise we fill
# sum_weights with nans)
if ~numpy.isnan(sumWeight) and ~numpy.isnan(sumWeightVals):
sum_weights[k] += sumWeight
sum_weighted_vals[k] += sumWeightVals
map['parser'] = p # return parser to map
oldsettings = numpy.seterr(divide='ignore') # ignore any div by zero errors
avgs = numpy.where(sum_weights != 0,
numpy.divide(sum_weighted_vals, sum_weights),
self.parmDict['fillVal'])
numpy.seterr(divide=oldsettings['divide']) # reset to default
numpy.savetxt(outfilename, avgs, delimiter=',', fmt='%7e')
return avgs
class OMNO2e_netCDF_avg_out_func(out_func):
'''
Weighted average for a given set of filtered values
based on OMNO2e algorithm.
Assumptions:
- this algorithm assumes that fields to be averaged will have,
at most, 1 extra dimension. If not, an assertion error is raised.
- this algorithm is undefined for cases where the weight of a term
is nonzero, but the term contains a fillValue. If this condition
is met, unexpected results may occur. This assumption is NOT checked
- The timestamp of the file is assumed to be in the TAI93 standard.
OMNO2e algorithm and theoretical basis laid out
at <http://disc.sci.gsfc.nasa.gov/Aura/data-holdings/OMI/omno2e_v003.shtml>
Set up to work specifically for OMI instruments.
parameters dict must contain keys:
overallQualFlag:
Flag used as overall quality. Assumed that
when this flag is set, the data is BAD and
the pixel is ignored
cloudFrac:
field with cloud fraction (0 to 1). When this
field is GREATER than the cutoff value the
pixel is ignored.
solarZenithAngle:
Field with solar zenith angle (in degrees).
When this field is GREATER than the cutoff
value the pixel is ignored.
time:
Field with the timestamps for each pixel.
Assumed to be in TAI-93 format. When
this field is less than the timeStart
parameter or greater than the timeStop
parameter the pixel is ignored.
longitude:
Field with the longitudes at cell centers.
Used to estimate timezones of the pixels if
'local' is selected for timeComparison. Not
used when timeComparison is 'UTC'
inFieldNames:
List of fields to process. Each of these
is output as a separate variable in the
netcdf output file.
outFieldNames:
List of desired output names. Must be of the
same length and co-indexed to the list above.
These will be the actual variable names in
the netcdf file.
outUnits:
List of string labels for the units of each
output quantity. Must be of the same length
and co-indexed to the lists above.
extraDimLabel:
List of the labels for the above extra
dimensions. 1 per variable. Only used if the
coindexed field has an extra dim. Must be of the
same length and co-indexed to the lists above.
extraDimSize:
List of the sizes of the above extra dimensions.
1 per variable. If the coindexed field does not
have an extra dim, put in 0 or 1. Must be
of the same length and co-indexed to the lists
above.
timeComparison:
Determines how the timeStart and timeStop
arguments are interpreted. If the user selects
'local', these arguments are interpreted as local
times. Only those pixels whose timestamps
indicate they lie in the desired span in local
time will be included. Daylight savings time
is not considered and time zone calculations are
only approximate. If the users selects 'UTC'
a straight comparison is done between the pixel
timestamp and the timeStart and timeStop
arguments to see if the pixel is valid.
timeStart:
Initial time we want included in file.
Times must be in TAI93 standard format.
*format hh:mm:ss_MM-DD-YYYY will also be converted automatically.
timeStop:
Final Time we want in included in file.
Times must be in TAI93 standard format.
*format hh:mm:ss_MM-DD-YYYY will also be converted automatically.
cloudFractUpperCutoff:
Pixels with a higher cloud fraction than this
value will be ignored.
solarZenAngUpperCutoff:
Pixels with a higher solar zenith angle than
this value will be ignored.
pixIndXtrackAxis:
The axis (IE which dimension in memory order)
that specifies the pixels cross-track position.
This way, the cross-track index number can
be retrieved safely and used in the weighting
function.
fillVal:
The value we want to use to denote missing data
in the output file. This will be documented
within the output file itself.
includePixelCount:
If this parameter is True, WHIPS will include a field
'ValidPixelCount' in the output file that will include
the number of valid pixels for each grid cell.
Outputs a netcdf file with name determined by outFileName
parameter. This netcdf file contains as many variables
as there are inFieldNames passed. Each variable
is output as an average over the range of values
where it was valid acccording to the averaging
scheme dedfined in the NASA document linked above.
'''
@staticmethod
def parm_list():
return ['overallQualFlag', 'cloudFrac', 'solarZenithAngle',
'time', 'longitude', 'inFieldNames', 'outFieldNames',
'outUnits', 'extraDimLabel', 'extraDimSize',
'timeComparison', 'timeStart', 'timeStop',
'cloudFractUpperCutoff', 'solarZenAngUpperCutoff',
'pixIndXtrackAxis', 'fillVal', 'includePixelCount']
@staticmethod
def required_parms():
return {'overallQualFlag' : ('The name of the field containing ' \
'the overall quality flag for the ' \
'pixels. This flag should be true (1) ' \
'for invalid pixels and false (0) for ' \
'valid pixels.\n{ OMI KNMI - Tropo' \
'sphericColumnFlag\n OMI NASA - vcd' \
'QualityFlags }',None),
'cloudFrac' : ('The name of the field containing the ' \
'cloud fractions\n{ OMI KNMI - CloudFraction' \
'\n OMI NASA - CloudFraction }',None),
'solarZenithAngle' : ('The name of the field containing the '\
'solar zenith angles in degrees.\n{ ' \
'OMI KNMI - SolarZenithAngle\n OMI ' \
'NASA - SolarZenithAngle }',None),
'time' : ('The name of the field containing the timestamps. '\
' Timestamps are assumed to be the in TAI-93 ' \
'format.\n{ OMI KNMI - Time\n OMI NASA - TIME }', \
None),
'longitude' : ('The name of the field containing longitudes ' \
'at cell centers. Longitudes should be in ' \
'degrees east.\n{ OMI KNMI - Longitude\n ' \
'OMI NASA - Longitude }',None),
'inFieldNames' : ('The names of the fields desired to be ' \
'output. Input as comma-delimited list ', \
'list'),
'outFieldNames' : ('The names of the output variables (even ' \
'if they are to be the same as the input ' \
'variables). Should be a comma-delimited ' \
'list co-indexed to inFieldNames','list'),
'outUnits' : ('The units of the variables to be written out. ' \
'Should be a comma-delimited list co-indexed ' \
'to inFieldNames','list'),
'extraDimLabel' : ('The label for the extra dimension ' \
'(should the variable have an extra ' \
'dimension). Ignored in the case of a ' \
'2D variable. Should be a comma-delimited '\
'list co-indexed to inFieldNames','list'),
'extraDimSize' : ('The size of the extra dimensions (should ' \
'the variable have an extra dimension). ' \
'For 2D variables, must be set to 0. (zero)' \
' Should be a comma-delimited list ' \
'co-indexed to inFieldNames.','list'),
'timeComparison' : ('Must be set to either "local" or "UTC". ' \
' Determines how the file timestamps are ' \
'compared to the start/stop time. If set '\
'to "local", then the file timestamps are '\
'converted to local time on a pixel-by-'\
'pixel basis (using longitude to estimate '\
'time zone) before being compared to time '\
'boundaries. If set to "UTC" the file ' \
'timestamps (which are assumed to be in ' \
'UTC) are compared against the start/stop '\
'time directly.',None),
'timeStart' : ('The earliest time for which data should be ' \
'recorded into the output file. All times in ' \
'input files before this time will be filtered '\
'out. Must be in the format hh:mm:ss_MM-DD-' \
'YYYY','time'),
'timeStop' : ('The latest time for which data should be ' \
'recorded into the output files. All times in '\
'input files after this time will be filtered ' \
'out. Must be in the format hh:mm:ss_MM-DD-' \
'YYYY','time'),
'cloudFractUpperCutoff' : ('The maximum cloud fraction to ' \
'allow before excluding pixel from '\
'average. Suggested value from ' \
'NASA is 0.3','decimal'),
'solarZenAngUpperCutoff' : ('The maximum solar zenith angle to'\
' allow before excluding pixel ' \
'from average, in degrees. ' \
'Suggested value from NASA is 85.',\
'int'),
'pixIndXtrackAxis' : ('The dimension order (0 based) of the ' \
'"cross-track" dimension (whichever ' \
'dimension has size 60). For all ' \
'currently known cases set equal to 1 ' \
'(depends on the construction of the ' \
'parser function. If you rewrite the ' \
'parser, check this).','int'),
'fillVal' : ('The value to use as a fill value in the output ' \
'netCDF file. This value will replace any missing '\
'or invalid output values','decimal'),
'includePixelCount' : ('If set to true, the output will include '\
'a field "ValidPixelCount" that contains '\
'the number of vlaid pixels in each grid '\
'cell. Only pixels with nonzero weight '\
'are considered valid.', 'bool')}
# variable signifying which list is to act as the master list index
__userKeys__ = "inFieldNames"
def __call__(self, maps, griddef, outfilename, verbose, version):
'''Write out a weighted-average file in netcdf format.'''
#Make sure non-string parameters are in the correct format
dimsizes = self.parmDict['extraDimSize']
for i in range(len(dimsizes)):
try:
dimsizes[i] = int(dimsizes[i])
except ValueError:
print ("Warning: {0} is not a valid extraDimSize value. " \
"Using 0 instead").format(dimsizes[i])
dimsizes[i] = 0
continue
self.parmDict['extraDimSize'] = dimsizes
# even though IO interface handles casting already,
# a catchblock has been added here for safety
# in case someone wants to use this class directly
castDict = {'overallQualFlag':str, 'cloudFrac':str,
'solarZenithAngle':str, 'time':str,
'longitude':str, 'inFieldNames':list,
'outFieldNames':list, 'outUnits':list,
'extraDimLabel':list, 'extraDimSize':list,
'timeComparison':str, 'timeStart':tai93conv,
'timeStop':tai93conv, 'cloudFractUpperCutoff':float,
'solarZenAngUpperCutoff':int, 'pixIndXtrackAxis':int,
'fillVal':float, 'includePixelCount':boolCaster}
for (k,func) in castDict.items():
try:
self.parmDict[k] = func(self.parmDict[k])
except TypeError:
pass
#Perform some basic sanity checks with parameters
if self.parmDict['timeStart'] > self.parmDict['timeStop']:
msg = 'Input start time must come before stop time.'
raise IOError(msg)
if (len(self.parmDict['inFieldNames']) != \
len(self.parmDict['outFieldNames']) or
len(self.parmDict['inFieldNames']) != \
len(self.parmDict['outUnits']) or
len(self.parmDict['inFieldNames']) != \
len(self.parmDict['extraDimLabel'])):
msg = 'All field/unit inputs ' + \
'should have the same number of elements.'
raise IOError(msg)
# create numpy arrays to hold our data
(minRow, maxRow, minCol, maxCol) = griddef.indLims()
nRows = maxRow - minRow + 1
nCols = maxCol - minCol + 1
nValidPixels = numpy.zeros((nRows, nCols))
sumWght = numpy.zeros((nRows, nCols, 1)) # needs extra dim to generalize for 3D vars
sumVars = dict()
for field, size in zip(self.parmDict['inFieldNames'], self.parmDict['extraDimSize']):
if size:
sumVars[field] = numpy.zeros((nRows, nCols, size))
else:
# pad with a singlet dim if it was 2D
sumVars[field] = numpy.zeros((nRows, nCols, 1))
# loop over maps
if not isinstance(maps, list):
maps = [maps] # create list if we only got a single map
for map in maps:
# open up context manager
with map.pop('parser') as parser: # remove parser for looping
if verbose:
print('Processing {0} for output at {1}.'.format(\
parser.name, str(datetime.datetime.now())))
# loop over gridboxes in map and calculate weights
for (gridCell, pixTup) in map.iteritems():
# translate gridCell to account for possible non-zero ll corner
gridRow = gridCell[0]
gridCol = gridCell[1]
gridInd = (gridRow - minRow, gridCol - minCol)
# get the values needed to calculate weight
for (pxInd, unused_weight) in pixTup:
# check summary flag
sumFlag = parser.get_cm(self.parmDict['overallQualFlag'], pxInd)
if sumFlag % 2:
continue
# check cloud fraction flag
cFrac = parser.get_cm(self.parmDict['cloudFrac'], pxInd)
if not (cFrac <= self.parmDict['cloudFractUpperCutoff']):
continue
# check solar zenith angle flag
solZenAng = parser.get_cm(self.parmDict['solarZenithAngle'], pxInd)
if solZenAng > self.parmDict['solarZenAngUpperCutoff']:
continue
# check time flag
time = parser.get_cm(self.parmDict['time'], pxInd)
# calculate and factor in offset if the user wanted us to
if self.parmDict['timeComparison'] == 'local':
pixLon = parser.get_cm(self.parmDict['longitude'], pxInd)
offset = utils.UTCoffset_from_lon(pixLon)
time += offset
if time < self.parmDict['timeStart'] or time > self.parmDict['timeStop']:
continue
# read in all the data, abandon ship if data is all NaN
rawDataDict = {}
try:
for field in self.parmDict['inFieldNames']:
rawData = parser.get_cm(field, pxInd)
if numpy.isnan(rawData).all():
raise invalidPixCeption
rawDataDict[field] = rawData
except invalidPixCeption:
continue
# compute the weight
fov = pxInd[self.parmDict['pixIndXtrackAxis']]
weight = _OMNO2e_formula(cFrac, fov)
assert weight != numpy.NaN
if weight > 0:
nValidPixels[gridInd] += 1
# add the weight tot the total for this cell
sumWght[gridInd] += weight
for field in self.parmDict['inFieldNames']:
weightVals = rawDataDict[field] * weight
if weightVals.size > 1:
sumVars[field][gridInd] = numpy.nansum([sumVars[field][gridInd], weightVals], axis=0)
else:
sumVars[field][gridInd] = numpy.nansum([sumVars[field][gridInd][0], weightVals])
map['parser'] = parser # return parser to map
# divide out variables by weights to get avgs.
oldSettings = numpy.seterr(divide='ignore')
avgs = dict()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for (field,var) in sumVars.iteritems():
unfiltAvgs = var/sumWght
filtAvgs = numpy.where(sumWght != 0, unfiltAvgs, \
self.parmDict['fillVal'])
# strip trailing singlet for 2D arrays
if filtAvgs.shape[-1] == 1:
avgs[field] = filtAvgs.reshape(filtAvgs.shape[0:2])
else:
avgs[field] = filtAvgs
numpy.seterr(divide=oldSettings['divide'])
# associate coindexed parameters into dicts
# so we can loop by field
outFnames = dict(izip(self.parmDict['inFieldNames'], self.parmDict['outFieldNames']))
units = dict(izip(self.parmDict['inFieldNames'], self.parmDict['outUnits']))
extraDim = dict(izip(self.parmDict['inFieldNames'], self.parmDict['extraDimLabel']))
# write out results to a netcdf file
outFid = netCDF4.Dataset(outfilename, 'w', format='NETCDF3_CLASSIC')
# create the 2 dimensions all files use
outFid.createDimension('row', nRows)
outFid.createDimension('col', nCols)
# write global attributes
setattr(outFid, 'Version', vsnmsg(version))
setattr(outFid, 'File_start_time', utils.nsecs_to_timestr(self.parmDict['timeStart'], '00:00:00 01-01-1993'))
setattr(outFid, 'File_end_time', utils.nsecs_to_timestr(self.parmDict['timeStop'], '00:00:00 01-01-1993'))
setattr(outFid, 'Max_valid_cloud_fraction', self.parmDict['cloudFractUpperCutoff'])
setattr(outFid, 'Max_valid_solar_zenith_angle', self.parmDict['solarZenAngUpperCutoff'])
setattr(outFid, 'Time_comparison_scheme', self.parmDict['timeComparison'])
fileListStr = ' '.join([map['parser'].name for map in maps])
setattr(outFid, 'Input_files', fileListStr)
setattr(outFid, 'Projection', griddef.__class__.__name__[:-8])
for (k,v) in griddef.parms.iteritems():
setattr(outFid, k, v)
# loop over fields and write variables
for field in self.parmDict['inFieldNames']:
# create tuple of dimensions, defining new dim
# if necessary
if len(avgs[field].shape) == 2:
# only row/cols
varDims = ('row', 'col')
elif len(avgs[field].shape) == 3:
# has extra dim
dimName = extraDim[field]
dimSize = avgs[field].shape[2]
if dimName not in outFid.dimensions.keys():
outFid.createDimension(dimName, dimSize)
varDims = ('row', 'col', dimName)
# create and assign value to variable
varHandle = outFid.createVariable(outFnames[field], 'd', varDims, fill_value=self.parmDict['fillVal'])
varHandle[:] = avgs[field]
# assign variable attributes
setattr(varHandle, 'Units', units[field])
# Write out the pixel counts if the user requested them
if self.parmDict['includePixelCount']:
varDims = ('row', 'col')
varHandle = outFid.createVariable('ValidPixelCount', 'i', varDims,
fill_value=self.parmDict['fillVal'])
varHandle[:] = nValidPixels
outFid.close()
# create a dict with teh same data as avgs, but diff names
outAvg = dict()
for (k,v) in avgs.iteritems():
outAvg[outFnames[k]] = v
if self.parmDict['includePixelCount']:
outAvg['ValidPixelCount'] = nValidPixels
return outAvg
class wght_avg_netCDF(out_func):
'''
Generalized weighted average algorithm
Designed to compute the average of an arbitrary number of desired
parameters, with the value weights based on an arbitrary number of input
parameters. Note that values may be weighted according to their own value.
The output will be in the form of a netcdf file with name determined by the
outFileName parameter. This netCDF file will have dimensions determined by
the grid_geo file, as well as additional dimensions as required by the
input fields.
Owing to the complexity of the inputs required for this function and the
security problems posed by allowing users to input functions to be
evaluated, this output function does not support the I/O interface at this
time. It is designed to subclassed.
This function (and therefore subclasses of this function) at present can
only handle a single input map. It may be extended to properly handle
multiple input maps at some point in the future, but this is difficult
because the filter function is expected to apply to all pixels in the cell
(which would require looping over all the maps to find all the pixels)
but also requires a reference to the parser (which would require those
parsers be held open)
parmDict must contain the following keys:
time:
The field associated with the timestamps. Timestamps may be in any
format so long as a function is provided to convert them to Unix
timestamp values (as this is what the function will use internally)
longitude:
Field with the longitudes at cell centers. Used to estimate
timezones of the pixels if local is selected for timeComparison.
Not used when timeComparison is 'UTC'
inFieldNames:
List of strings corresponding to fields for which output is
desired. These must be valid keys for the parser. Each is output
as a seperate variable in the netcdf output file.
outFieldNames:
List of strings corresponding to the names the output variables
should have in the final netCDF file. Must be of the same length
and co-indexed to the list above.
outUnits:
List of strings corresponding to the labels for the units of each
output variable. These will be attached as the "units" attribute
for each variable in the output netCDF file. Must be of the same
length and co-indexed to the lists above.
logNormal:
Vector indicating whether or not we want to take the
lognormal mean (as opposed to the simple, arithmetic mean). If
this parameter is set to "True", the mean will be taken as follows:
logData = log(data)
logAvg = sum(logData*wghts)/sum(wghts)
avg = 10^logAvg
whereas if this parameter is set to "False" the mean will be simply:
avg = sum(data*wghts)/sum(wghts)
To allow finer-grained control of the output, logNormal must be
set individually for each output field (as it may be appropriate
to use the log normal distribution only for select fields). Thus,
logNormal must be of the same length and co-indexed to the lists
above.
dimLabels:
List of tuple-like strings(delimited by periods with no whitespace),
each of which contains as many strings as there are
extra dimensions in the corresponding field. IE, if a field has
dimensions (xtrack, time, layer, quant) and we allocate along
xtrack and time, then the tuple-like string for that field should be
"(layer.quant)" so that the dimensions are named properly in the
otuput netCDF file. The list (not the individual tuples) must be
of the same length and co-indexed to the lists above. Note that
the dimensions looped over and allocated to cells in the map_geo
function DO NOT need to be represented here.
dimSizes:
List of tuple-like strings (delimited by periods with no whitespace),
each of which contains as many strings (castable to ints) as there
are extra dimensions in the corresponding field. IE if the field
described in dimLabels had dimensions (60, 1300, 9, 4) then the
tuple-like string for that field should be "(9.4)". The list (not the
individual tuples) must be of the same length and co-indexed to the
lists above. If the field has no extra dimensions, then an empty
tuple should be used as a placeholder.Note that the dimensions
looped over and allocated to cells in the map_geo function DO NOT
need to be represented here.
timeComparison:
Determines how the timeStart and timeStop parameters are
interpreted. Must be either 'local' or 'UTC'. If 'local' is
selected, only those pixels in the desired timespan in in local
time will be included. Daylight savings time is not considered and
time zone calculations are approximate based on longitude. If
'UTC' is selected, the raw timestamps are compared directly to the
timeStart and timeStop parameters, without attempting to account
for timezone.
timeStart:
Initial time we want included in the output file. All measurements
taken before this time will be discarded, even if they are included
in the files passed to output function. Must be a string of the
the format hh:mm:ss_MM-DD-YYYY.
timeStop:
Final time to be included in the output file. All measurements
taken after this time will be discarded. Must be a string of the
format hh:mm:ss_MM-DD-YYYY.
timeConv:
Function that converts timeStart and timeStop (both of which are
strings of the format hh:mm:ss_MM-DD-YYYY) into the format used to
store time within the parser. IE if the parser returns time in
TAI93, then this function should convert a string
hh:mm:ss_MM-DD-YYYY to TAI93.
fillVal:
The value with which we want to denote missing data in the output
file. This value must be castable to the type of all output
variables. Each variable will document the fill value in its
attributes.
notes:
String to be included in the output attributes of the final file.
Use this to hold any extra if you you'd like to be packaged with
the data.
weightFunction:
Function that computes the weight of a value. This can be as
simple as assigning a weight of 1 to that value (creating an
unweighted average) or using multiple fields to generate a weight
for each pixel. The function must be of the form:
weight = weightFunction(parser, index, prevWght)
where parser is the parser for the file under consideration, ind
is the tuple of indices for the pixel under consideration, and
prevWght is the weight as computed by the mapping function. Note
that in authoring these functions it is safe to use both the get()
and get_cm() functions of the parser argument. The function may
return either 0 or NaN for pixels that should not be considered in
the average. Either will be handled appropriately. The docstring
for this function will be included as a global attribute in the
output file, so the docstring should be sufficient to describe the
function in it's entirety.
filterFunction:
Function that looks at the entire stack of pixels for a cell and
selects any pixels that need to be filtered out. Note that for
efficiency reasons this should not be used to catch filter
conditions unique to each pixel. Those sort of filters should
be applied in the weightFunction. This function is strictly
intended for operations that can only be performed once the entire
stack for a particular cell is available (IE if a value is checked
for each pixel in the cell and those pixels not matching the
majority value are rejected). The function must be of the form
flagVec = filterFunction(parser, indStack)
where indStack is an iterable of index tuples and parser is the
parser for the file under consideration. flagVec (the return
vector) should be boolean and true for those values that should
NOT be included in the final average. It should be the same length
as indStack. To reiterate: flagVec should true for those values
that should be REMOVED, and false for those values to be kept.
The docstring of this function will be included as a global
attribute in the final output file, so the docstring should be
sufficient to describe the function in it's entirety. Note that it
is safe to use both get and get_cm functions within this function -
it is guaranteed to be called within a context manager.
'''
def __init__(self, parmDict=None):
# call ancestor method
out_func.__init__(self, parmDict)
# check that all the lists are the same length
lists = ['outFieldNames', 'outUnits', 'dimLabels', 'dimSizes', 'logNormal']
canonicalLength = len(self.parmDict['inFieldNames'])
isCorrectLength = [len(self.parmDict[list]) == canonicalLength for list in lists]
if not all(isCorrectLength):
wrongLength = [name for (name, corr) in izip(lists,isCorrectLength) if not corr]
msg = "All lists must be the same length. The following list lengths do " \
"not match the length of inFieldNames: " + ' '.join(wrongLength)
raise ValueError(msg)
# process lists of tuple-like objects into lists of appropriately-typed tuples
labelTups = self.parmDict['dimLabels']
sizeTups = self.parmDict['dimSizes']
# confirm that each tuple is the same size
tupsMatch = [len(l) == len(s) for (l,s) in izip(labelTups, sizeTups)]
if not all(tupsMatch):
misMatch = [l + ' does not match ' + s for (l,s,m) in izip(labelTups,sizeTups,tupsMatch) if not m]
msg = "All tuple-like strings must correspond to tuples of corresponding size. " \
"The following sets do not correspond: \n" + '\n'.join(misMatch)
raise ValueError(msg)
# convert sizes to integers
try:
sizeIntTups = [[int(s) for s in strTup] for strTup in sizeTups]
except ValueError as err:
messageWords = err.message.split()
uncastable = messageWords[-1].strip("'")
msg = "The value %s in the dimSizes argument was not castable to " \
"an integer." % uncastable
raise ValueError(msg)
# put back into parameter dictionary
self.parmDict['dimLabels'] = labelTups
self.parmDict['dimSizes'] = sizeIntTups
# process logNormal
try:
self.parmDict['logNormal'] = [boolCaster(el) for el in self.parmDict['logNormal']]
except TypeError:
print('Bad string in logNormal. Must be either "True" or "False". Exiting.')
raise
# convert all the parameters co-indexed to inFieldNames to dictionaries
# keyed off of inFieldNames
inFnames = self.parmDict['inFieldNames']
for key in lists:
self.parmDict[key] = dict(zip(inFnames, self.parmDict[key]))
def __call__(self, maps, griddef, outfilename, verbose, version):
'''Write out a weighted-average file in netcdf format.'''
# create a dictionary of numpy arrays that will hold the data for all
# our variables, keyed to inFieldNames
(minRow, maxRow, minCol, maxCol) = griddef.indLims()
nRows = maxRow - minRow + 1
nCols = maxCol - minCol + 1
outputArrays = dict()
for field in self.parmDict['inFieldNames']:
dims = [nRows, nCols] + self.parmDict['dimSizes'][field]
outputArrays[field] = numpy.zeros(dims)
# prep for computing weights
wghtDict = dict() # we only want to compute each weight once
wghtFunc = self.parmDict['weightFunction']
filtFunc = self.parmDict['filterFunction']
# convert the times to the proper format
tConvFunc = self.parmDict['timeConv']
timeStart = tConvFunc(self.parmDict['timeStart'])
timeStop = tConvFunc(self.parmDict['timeStop'])
# loop over maps
if not isinstance(maps, list):
maps = [maps] # create list if we didn't get one
# check to see that we were given exactly one map. If we aren't
# explain that this function only works for one map and exit
if len(maps) != 1:
msg = "Though in general output functions are designed to handle" \
" multiple input files, this function currently can only " \
"process individual input files. Please only provide one " \
"input file or use a different output function. This " \
"limitation may be fixed if there is a convincing reason " \
"to rewrite the function to accomodate more inputs"
raise NotImplementedError(msg)
for map in maps:
with map.pop('parser') as p:
if verbose:
print('Processing %s for output at %s' %
(p.name, str(datetime.datetime.now())))
# loop over the cells in the map, processing each
for (cellInd, pixTups) in map.iteritems():
# compute the weight only if we haven't already. In either
# case, put the weights in array.
wghts = [wghtDict.setdefault(ind, wghtFunc(p, ind, wgt))
for (ind, wgt) in pixTups]
# create the time array we'll be using to filter
tArray = numpy.array([p.get_cm(self.parmDict['time'], ind)
for (ind, wgt) in pixTups]).squeeze()
if self.parmDict['timeComparison'] == 'local':
offsets = numpy.array([utils.UTCoffset_from_lon(
p.get_cm(self.parmDict['longitude'], ind))
for (ind, wgt) in pixTups]).squeeze()
tArray += offsets
tFlag = numpy.logical_or(tArray < timeStart, tArray > timeStop)
# use the filter function on the stack to apply user-defined
# filter conditions
pixIndStack = [pInd for (pInd, unused_weight) in pixTups]
uFlag = numpy.array(filtFunc(p, pixIndStack))
# combine time filter and user filter into a single, global flag
gFlag = numpy.logical_or(uFlag, tFlag)
# filter the weights so that values that will be rejected don't
# have their weights included in the denominator of the final
# average.
wghts = numpy.where(gFlag, numpy.NaN, wghts)
# loop over fields. For each, compute avg and save
for field in self.parmDict['inFieldNames']:
# create the array of weighted values
vals = numpy.array([p.get_cm(field, ind)
for (ind, wgt) in pixTups]).squeeze()
if self.parmDict['logNormal'][field]:
vals = numpy.log(vals) # work with logarithm of data
# create a slice object that will allow us to broadcast
# weights against the values
extraDims = self.parmDict['dimSizes'][field]
# handle the special case where we put in 0 for extradims
if len(extraDims) == 1 and extraDims == [0]:
nExtraDims = 0
else:
nExtraDims = len(extraDims)
wghtSlice = [Ellipsis]+[numpy.newaxis]*nExtraDims
# handle special case where there were no pixels in
# cell
if vals.size == 0:
vals = vals.reshape([0]+extraDims)
# compute weighted values
wghtVals = vals*wghts[wghtSlice]
# average the weighted Values
wghtValSum = numpy.nansum(wghtVals, axis=0)
wghtSum = numpy.nansum(wghts, axis=0)
# avoid hassle with div/0 warnings
if wghtSum != 0:
wghtValAvg = wghtValSum/wghtSum
else:
wghtValAvg = numpy.NaN
# re-exponentiate if we took log average
if self.parmDict['logNormal'][field]:
wghtValAvg = numpy.exp(wghtValAvg)
# mask nan's with fillVal, then slot into output array
wghtValAvg = numpy.where(numpy.isnan(wghtValAvg),
self.parmDict['fillVal'],
wghtValAvg)
outputArrays[field][cellInd] = wghtValAvg
# done looping over fields
# done looping over cells
# done with context manager on parser
# return the parser to the map so it can be used elsewhere
map['parser'] = p
if verbose:
print('Done processing %s at %s' %
(p.name, str(datetime.datetime.now())))
# done looping over maps
# set up the parts of the netcdf file that AREN'T field specific
outFid = netCDF4.Dataset(outfilename, 'w', format='NETCDF3_CLASSIC')
outFid.createDimension('row', nRows)
outFid.createDimension('col', nCols)
# set global attributes
setattr(outFid, 'Version', vsnmsg(version))
setattr(outFid, 'File_start_time',
utils.nsecs_to_timestr(self.parmDict['timeStart'],
epoch='00:00:00 01-01-1993',
format='%H:%M:%S %m-%d-%Y'))
setattr(outFid, 'File_stop_time',
utils.nsecs_to_timestr(self.parmDict['timeStop'],
epoch='00:00:00 01-01-1993',
format='%H:%M:%S %m-%d-%Y'))
setattr(outFid, 'Time_comparison_scheme', self.parmDict['timeComparison'])
flistStr = ' '.join([map['parser'].name for map in maps])
setattr(outFid, 'Input_files', flistStr)
setattr(outFid, 'Weighting_function_description', wghtFunc.__doc__)
setattr(outFid, 'Filter_function_description', filtFunc.__doc__)
# add in attributes for the projection
setattr(outFid, 'Projection', griddef.__class__.__name__[:-8])
setattr(outFid, 'Notes', self.parmDict['notes'])
for k in griddef.parm_list():
setattr(outFid, k, griddef.parms[k])
# loop over fields and write all information for each field
for field in self.parmDict['inFieldNames']:
# create the dimensions in the file
extraDimSizes = self.parmDict['dimSizes'][field]
extraDimLabels = self.parmDict['dimLabels'][field]
for (size, label) in zip(extraDimSizes, extraDimLabels):
if label not in outFid.dimensions.keys():
outFid.createDimension(label, size)
# write the variable to file
vDims = ['row', 'col'] + extraDimLabels
outFieldName = self.parmDict['outFieldNames'][field]
varHand = outFid.createVariable(outFieldName, 'd', vDims, fill_value=self.parmDict['fillVal'])
varHand[:] = outputArrays[field]
# write variable attributes
setattr(varHand, 'Units', self.parmDict['outUnits'][field])
# close the output file
outFid.close()
# create an output dictionary keyed to output field names
finalOutArrays = dict()
for (k,v) in outputArrays.iteritems():
finalOutArrays[self.parmDict['outFieldNames'][k]] = v
return finalOutArrays
class unweighted_filtered_MOPITT_avg_netCDF_out_func(wght_avg_netCDF):
'''
Output averager designed to work with MOPITT Level 2 data. Emulates the
NASA developed averaging algorithm for level 3 (mostly) faithfully.
Following the NASA precedent, data are separated into either daytime or
nighttime values according to solar zenith angle. Unfortunately, since
none of the NASA documentation actually specifies what cutoff value was
used for solar zenith angle, the value is left up to the user with a
default of 85. Also, in contrast to the NASA product, only one time (day
or night) is actually included. Which one is left to the user and noted
in the attributes of the output file.
Also following NASA precedent, data are filtered based on surface type.
For cells where one surface type makes up more than 75% of the pixels,
that surface type is used exclusively. For cells where no surface type
reaches the 75% threshold, all pixels are included.
Again following the NASA algorithm, if pixels containing differing numbers
of valid levels are present in a single grid cell, only the pixels
comprising the majority are retained. This is tested using the retrieved
CO mixing ratio profile field. Note that this applies to BOTH 3D AND
NON-3D FIELDS. That is, a surface measurement will still be filtered
if the corresponding column measurement does not have the majority number
of layers present, EVEN IF NO COLUMN MEASUREMENTS ARE REQUESTED. Furthermore,
all columns are filtered based on the chosen column measurement, even
if they have information present in an 'invalid' layer
The user is given the option of averaging each field assuming either a
normal or log-normal distribution. This is left to the user's discretion
so make sure you know which fields it is appropriate to average and which
should be log-averaged.
For further details (don't get your hopes up) on the NASA algorithm, refer
to
<NAME> (2009). MOPITT (Measurements of Pollution in the
Troposphere) Validated Version 4 Product Users Guide. Available
from <http://www.acd.ucar.edu/mopitt/products.shtml>
The output will be a netCDF file with the name determined in the standard
way. Will have the appropriate dimensions for those input fields being
processed, as well as the fundamental rows/cols determined by grid_geo
This class subclasses the generic wght_avg_netCDF. It handles all
changes in interface in it's __init__ method and lets any actual calls
filter up to the parent.
The parameters dictionary must contain the following keys:
time: SEE DOCUMENTATION FOR wght_avg_vals_netCDF_out_func
NOTE: must be in TAI93 format
longitude: SEE DOCUMENTATION FOR wght_avg_vals_netCDF_out_func
inFieldNames: SEE DOCUMENTATION FOR wght_avg_vals_netCDF_out_func
outFieldNames: SEE DOCUMENTATION FOR wght_avg_vals_netCDF_out_func
outUnits: SEE DOCUMENTATION FOR wght_avg_vals_netCDF_out_func
logNormal: SEE DOCUMENTATION FOR wght_avg_vals_netCDF_out_func
dimLabels: SEE DOCUMENTATION FOR wght_avg_vals_netCDF_out_func
dimSizes: SEE DOCUMENTATION FOR wght_avg_vals_netCDF_out_func
timeStart: SEE DOCUMENTATION FOR wght_avg_vals_netCDF_out_func
timeStop: SEE DOCUMENTATION FOR wght_avg_vals_netCDF_out_func
timeComparison: SEE DOCUMENTATION FOR wght_avg_vals_netCDF_out_func
fillVal: SEE DOCUMENTATION FOR wght_avg_vals_netCDF_out_func
solZenAng: The string for the field associated with the solar zenith
angle. Must be in degrees
solZenAngCutoff: The cutoff solar zenith angle value dividing night
from day. Pixels with SZA > solZenAngCutoff will be considered
nighttime pixels. 90 is mathematically correct, values between
are typeically used in practice. In degrees. If SZA is exactly
equal to the cutoff, it is included regardless of whether day
or night was selected.
dayTime: Boolean variable setting whether the desired output file
will be for the daytime or nighttime. If set to "True", the output
file will feature daylight retrievals only. If set to "False", the
output will feature night retrievals only. Note that by most
estimates, daylight retrievals are higher quality.
surfTypeField: The string for the field associated with the surface
type. This field is assumed to have integers corresponding to
different surface types. No effort is made to distinguish
between surface types (only to ensure they are consistent) so
the mapping of integers to physical surface types is irrelevant.
colMeasField: The string for the field associated with a column
measurement. This can be any field with exactly one extra
dimension, provided it has NaN's at the same levels as other
fields where appropriate. The canonical field to use here is
the retrieved CO mixing ratio profile.
'''
@staticmethod
def parm_list():
return ['time', 'longitude', 'inFieldNames', 'outFieldNames',
'outUnits', 'logNormal', 'dimLabels', 'dimSizes', 'timeStart',
'timeStop', 'timeComparison', 'fillVal', 'solZenAngCutoff',
'solZenAng', 'dayTime', 'surfTypeField', 'colMeasField']
@staticmethod
def required_parms():
return {'time' : ('The name of the field containing timestamps. ' \
'Timestamps are assumed to be in the TAI-93 format.' \
'\n{ MOPITT - TIME }', None),
'longitude' : ('The name of the field containing longitudes ' \
'at cell centers. Longitudes should be in ' \
'degrees east.\n{ MOPITT - Longitude }', None),
'inFieldNames' : ('The names of the fields desired to be ' \
'output. Input as comma-delimited list.', \
'list'),
'outFieldNames': ('The names of the output variables. (even ' \
'if they are to be the same as input ' \
'variables). Should be a comma-delimited ' \
'list co-indexed to inFieldNames', 'list'),
'outUnits' : ('The units of the variables to be written out.' \
' Should be a comma-delimited list co-indexed '\
'to inFieldNames', 'list'),
'logNormal' : ('List of boolean strings that specify how to ' \
'take the averages of the corresponding fields.'\
' If the string is "True" that field is ' \
'averaged assuming a lognormal distribution. ' \
'If the string is "False" that field is ' \
'averaged assuming a normal distribution. ' \
'Should be a comma-delimited list co-indexed ' \
'to inFieldNames', 'list'),
'dimLabels' : ('List of names of the extra dimensions in the ' \
'output file. Must be a semicolon-delimited ' \
'list of comma-delimited strings. Fields with no'\
'extra dimensions may be left blank. ' \
'For example, if there are four inFields, the ' \
'first and third of which have no extra ' \
'dimensions, the second of which has one ("foo"),'\
' and the fourth has two ("foo" and "bar"), the '\
'dimLabels entry should look like this: '\
';foo;;foo,bar The outer (semicolon-delimited) '\
'list must be co-indexed to inFieldNames',
'listoflists'),
'dimSizes' : ('List of the sizes of the extra dimensions in the' \
' output file. Must be a semicolon-delimited list'\
' of comma-delimited lists of integers. Fields'\
'with no extra dimensions may be left blank. ' \
'For example, if there are four inFields, the ' \
'first and third of which have no extra ' \
'dimensions, the second of which has one (which ' \
'has length four), and the fourth has two (which '\
'have lengths four and five, respectively), the '\
'dimSizes entry should look like this: ;4;;4,5 ' \
'The outer (semicolon-delimited) list must be ' \
'co-indexed to inFieldNames and all sub-lists ' \
'should be the same size as the corresponding ' \
'sublist in dimLabels.', 'listoflists'),
'timeStart' : ('The earliest time for which data should be ' \
'recorded into the output file. All times ' \
'before this time in the input file(s) will ' \
'be filtered out. Must be in the format: hh:' \
'mm:ss_MM-DD-YYYY', 'time'),
'timeStop' : ('The latest time for which data should be ' \
'recorded into the output file. All times after'\
' this time in the input file(s) will be ' \
'filtered out. Must be in the format: ' \
'hh:mm:ss_MM-DD-YYYY','time'),
'timeComparison' : ('Must be set to either "local" or "UTC". '\
'Determines how the file timestamps are ' \
'compared to the start/stop time. If set '\
'to "local", then the file timestamps are ' \
'converted to local time on a pixel-by-pixel'\
' basis (using longitude to estimate time ' \
'zone) before being compared to time ' \
'boundaries. If set to "UTC" the file ' \
'timestamps (which are assumed to be in UTC)'\
' are compared against the start/stop time '\
'directly.', None),
'fillVal' : ('The value to use as a fill value in the output '\
'netCDF file. This value will replace any '\
'missing or invalid output values', 'decimal'),
'solZenAngCutoff' : ('The solar zenith angle that defines the '\
'day to night transition (we use the SZA '\
'to separate day and night pixels, which '\
'should not be averaged together), in ' \
'degrees. The geometric value here would ' \
'be 90. Recommended value is 85.',
'decimal'),
'solZenAng' : ('The name of the field containing the solar' \
' zenith angle in degrees. { MOPITT - Solar ' \
'Zenith Angle }', None),
'dayTime' : ('Boolean variable that indicates ' \
'whether the output file should contain ' \
'values from day or night. If set to ' \
'"True" the output file will have ' \
'daylight values. If set to "False" ' \
'the output file will have night ' \
'values.', 'bool'),
'surfTypeField' : ('The name of the field containing the ' \
'surface type index.\n{ MOPITT - Surface ' \
'Index }', None),
'colMeasField' : ('The name of the field containing the ' \
'column measurement that will be used to ' \
'determine which levels are valid in a ' \
'cell. Canonically the retrieved CO mixing' \
' ratio profile field. It is assumed that ' \
'the field will have a layer dimension first' \
' and a 2-element second dimension (for ' \
'values and std devs) of which we want the ' \
'first slice.\n{ MOPITT - Retrieved CO Mixing '\
'Ratio Profile }', None)}
# variable signifying which list is to act as the master list index
__userKeys__ = "inFieldNames"
def __init__(self, pDict):
'''Convert input to format of parent input'''
# make a shallow copy to the parameter dict, as we'll be making changes
# and we don't want to mutate the argument
parmDict = dict(pDict)
# even though IO interface handles casting already,
# a catchblock has been added here for safety
# in case someone wants to use this class directly
castDict = {'time':str, 'longitude':str,
'inFieldNames':list, 'outFieldNames':list,
'outUnits':list, 'logNormal':list,
'dimLabels':list, 'dimSizes':list,
'timeStart':tai93conv, 'timeStop':tai93conv,
'timeComparison':str, 'fillVal':float,
'solZenAngCutoff':float, 'solZenAng':str,
'dayTime':bool, 'surfTypeField':str,
'colMeasField':str}
for (k,func) in castDict.items():
try:
parmDict[k] = func(parmDict[k])
except TypeError:
pass
# by this point times are already converted to TAI93 standard
# no need to convert here
parmDict['timeConv'] = lambda(x):x
# remove extraneous entries in parmDict. They will be incorporated in
# weighting and filtering functions
SZAcut = parmDict.pop('solZenAngCutoff')
SZAfield = parmDict.pop('solZenAng')
dayTime = parmDict.pop('dayTime')
surfField = parmDict.pop('surfTypeField')
colMeasField = parmDict.pop('colMeasField')
dayBool = dayTime
# note which was chosen
parmDict['notes'] = 'All values %s with cutoff at %6.2f' % \
('daytime' if dayBool else 'nighttime', SZAcut)
# create weighting function
def wghtFunc(parser, index, prevWght):
'''
Values not explicitly weighted. Values not in desired part of
diurnal cycle (as determined by solar zenith angle) are given weight
of 0 and therefore not included in final average
'''
SZA = parser.get_cm(SZAfield, index)
if dayBool and SZA <= SZAcut:
# we want day and it's day
return 1
elif not dayBool and SZA >= SZAcut:
# we want night and it's night
return 1
else:
return 0
parmDict['weightFunction'] = wghtFunc
# create filtering function
def filterFunc(parser, indStack):
'''
Filter is twofold. First filter checks if any surface type makes
up 75% of the pixels in the cell. If it does, all other surface
types are rejected. Second filter checks if column retrievals have
different numbers of valid retrievals. If they do, then the pixels
in the minority are rejected. In the case of a tie the pixels with
more levels present are retained.
'''
# short-circuit an empty indstack because it's a common case that's
# difficult to efficiently code for
if len(indStack) == 0:
return numpy.array([])
# first filter
sTypes = numpy.array([parser.get_cm(surfField, ind) for ind in indStack]).squeeze()
uniques = numpy.unique(sTypes)
uniqueFracs = [float((sTypes == un).sum())/sTypes.size for un in uniques]
cellType = None
for (type,frac) in izip(uniques,uniqueFracs):
# at most one value can meet threshold
if frac >= .75:
cellType = type
if cellType is None:
# none met threshold, all are used
sFlag = numpy.array([False]*len(sTypes))
else:
# one met threshold
sFlag = sTypes != cellType
# second filter
columns = [parser.get_cm(colMeasField, ind)[:,0] for ind in indStack]
nValidInCol = numpy.array([col.size - numpy.isnan(col).sum() for col in columns])
uniqueNvals = set(nValidInCol)
uniqueCounts = numpy.array([(nValidInCol == val).sum() for val in uniqueNvals])
maxCount = uniqueCounts.max()
maxNVals = [nv for (nv,c) in izip(uniqueNvals,uniqueCounts) if c == maxCount]
# if there are multiples with same count, we want the highest number of valid
# values, so we take the largest
maxNVal = max(maxNVals)
cFlag = numpy.array([nValid != maxNVal for nValid in nValidInCol])
# combine the filters and return
return numpy.logical_or(cFlag, sFlag)
parmDict['filterFunction'] = filterFunc
# invoke parent's constructor
wght_avg_netCDF.__init__(self, parmDict)
``` |
{
"source": "joberthrogers18/TIL",
"score": 3
} |
#### File: python/multithreading/index.py
```python
from utils import thread_delay
import threading
import _thread
import time
def start_and_wait_thread_finish(*args):
for thr in args:
thr.start()
for thr in args:
# wait thread finish to execute the next lines
thr.join()
def volume_cube(a):
print('Volume of cube: ', a**3)
def volume_square(a):
print('Volume of Square: ', a*a)
if __name__ == '__main__':
_thread.start_new_thread(thread_delay, ('t1', 1))
_thread.start_new_thread(thread_delay, ('t2', 5))
t3 = threading.Thread(target=thread_delay, args=('t3', 2))
t4 = threading.Thread(target=thread_delay, args=('t4', 3))
start_and_wait_thread_finish(t3, t4)
print('\n\nThread execution is complete!\n')
th_volume_1 = threading.Thread(target=volume_cube, args=(2,))
th_volume_2 = threading.Thread(target=volume_square, args=(3,))
start_and_wait_thread_finish(th_volume_1, th_volume_2)
print('\n\nVolumes threading are complete!\n')
time.sleep(12000)
```
#### File: python/multithreading/utils.py
```python
import time
def thread_delay(thread_name, delay):
count = 0
while count < 3:
time.sleep(delay)
count += 1
print(thread_name, '--------->', time.time())
``` |
{
"source": "jobes/modoboa-installer",
"score": 2
} |
#### File: modoboa_installer/scripts/nginx.py
```python
import os
from .. import package
from .. import system
from .. import utils
from . import base
from .uwsgi import Uwsgi
class Nginx(base.Installer):
"""Nginx installer."""
appname = "nginx"
packages = {
"deb": ["nginx", "ssl-cert"],
"rpm": ["nginx"]
}
def get_template_context(self, app):
"""Additionnal variables."""
context = super(Nginx, self).get_template_context()
context.update({
"app_instance_path": (
self.config.get(app, "instance_path")),
"uwsgi_socket_path": Uwsgi(self.config).get_socket_path(app)
})
return context
def _setup_config(self, app, hostname=None, extra_config=None):
"""Custom app configuration."""
if hostname is None:
hostname = self.config.get("general", "hostname")
context = self.get_template_context(app)
context.update({"hostname": hostname, "extra_config": extra_config})
src = self.get_file_path("{}.conf.tpl".format(app))
if package.backend.FORMAT == "deb":
dst = os.path.join(
self.config_dir, "sites-available", "{}.conf".format(hostname))
utils.copy_from_template(src, dst, context)
link = os.path.join(
self.config_dir, "sites-enabled", os.path.basename(dst))
if os.path.exists(link):
return
os.symlink(dst, link)
group = self.config.get(app, "user")
user = "www-data"
else:
dst = os.path.join(
self.config_dir, "conf.d", "{}.conf".format(hostname))
utils.copy_from_template(src, dst, context)
group = "uwsgi"
user = "nginx"
system.add_user_to_group(user, group)
def post_run(self):
"""Additionnal tasks."""
extra_modoboa_config = ""
if self.config.getboolean("automx", "enabled"):
hostname = "autoconfig.{}".format(
self.config.get("general", "domain"))
self._setup_config("automx", hostname)
extra_modoboa_config = """
location ~* ^/autodiscover/autodiscover.xml {
include uwsgi_params;
uwsgi_pass automx;
}
location /mobileconfig {
include uwsgi_params;
uwsgi_pass automx;
}
"""
if self.config.get("radicale", "enabled"):
extra_modoboa_config += """
location /radicale/ {
proxy_pass http://localhost:5232/; # The / is important!
proxy_set_header X-Script-Name /radicale;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass_header Authorization;
}
"""
self._setup_config(
"modoboa", extra_config=extra_modoboa_config)
if not os.path.exists("{}/dhparam.pem".format(self.config_dir)):
cmd = "openssl dhparam -dsaparam -out dhparam.pem 4096"
utils.exec_cmd(cmd, cwd=self.config_dir)
``` |
{
"source": "jobevers/diagonal-crop",
"score": 3
} |
#### File: diagonal-crop/diagonal_crop/__init__.py
```python
from diagonal_crop.point import * # pylint: disable=wildcard-import
from diagonal_crop.util import * # pylint: disable=wildcard-import
def crop(im, base, angle, height, width):
"""Return a new, cropped image.
Args:
im: a PIL.Image instance
base: a (x,y) tuple for the upper left point of the cropped area
angle: angle, in radians, for which the cropped area should be rotated
height: height in pixels of cropped area
width: width in pixels of cropped area
"""
base = Point(*base)
points = getRotatedRectanglePoints(angle, base, height, width)
return _cropWithPoints(im, angle, points)
def _cropWithPoints(im, angle, points):
bounds = getBounds(points)
im2 = im.crop(roundint(bounds))
bound_center = getBoundsCenter(bounds)
crop_center = getCenter(im2)
# in the cropped image, this is where our points are
crop_points = [pt.recenter(bound_center, crop_center) for pt in points]
# this is where the rotated points would end up without expansion
rotated_points = [pt.rotate(crop_center, angle) for pt in crop_points]
# expand is necessary so that we don't lose any part of the picture
im3 = im2.rotate(-angle * 180 / math.pi, expand=True)
# but, since the image has been expanded, we need to recenter
im3_center = getCenter(im3)
rotated_expanded_points = [pt.recenter(crop_center, im3_center) for pt in rotated_points]
im4 = im3.crop(roundint(getBounds(rotated_expanded_points)))
return im4
``` |
{
"source": "jobevers/flask-uwsgi-nginx-docker",
"score": 2
} |
#### File: flask-uwsgi-nginx-docker/app/util.py
```python
import collections
import datetime
import functools
import logging
import sys
import flask
import flask_jwt
import users
def standardConsoleHandler():
result = logging.StreamHandler(sys.stdout)
format_ = "%(asctime)s %(levelname)-8s %(name)s: %(message)s"
formatter = logging.Formatter(format_)
result.setFormatter(formatter)
return result
def configureJWT(app, db_session):
jwt = flask_jwt.JWT(app, functools.partial(users.authenticate, db_session), users.identity)
# this is nearly identical to the default except that
# they use the `id` property and I use the `id_` property
def jwt_payload_handler(user):
iat = datetime.datetime.utcnow()
exp = iat + app.config.get('JWT_EXPIRATION_DELTA')
nbf = iat + app.config.get('JWT_NOT_BEFORE_DELTA')
return {'exp': exp, 'iat': iat, 'nbf': nbf, 'identity': user.id_}
jwt.jwt_payload_handler(jwt_payload_handler)
return jwt
``` |
{
"source": "jobevers/python-keepasshttp",
"score": 3
} |
#### File: python-keepasshttp/keepasshttp/crypto.py
```python
import base64
import os
from cryptography.hazmat.primitives import padding
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from keepasshttp import util
KEY_SIZE = 32 # this is in bytes
IV_SIZE = 16
PAD_SIZE = 128 # in bits
def pad(data, size=PAD_SIZE): # size is in bits; 16 bytes = 128 bits
padder = padding.PKCS7(size).padder()
padded_data = padder.update(data)
return padded_data + padder.finalize()
def unpad(padded_data, size=PAD_SIZE):
unpadder = padding.PKCS7(size).unpadder()
data = unpadder.update(padded_data)
return data + unpadder.finalize()
def getCipher(key, iv):
backend = default_backend()
return Cipher(
algorithms.AES(base64.b64decode(key)),
modes.CBC(base64.b64decode(iv)),
backend
)
def encrypt(data, key, iv):
cipher = getCipher(key, iv)
encryptor = cipher.encryptor()
p = pad(data)
res = encryptor.update(p) + encryptor.finalize()
return base64.b64encode(res)
def decrypt(data, key, iv):
cipher = getCipher(key, iv)
decryptor = cipher.decryptor()
padded_data = decryptor.update(base64.b64decode(data)) + decryptor.finalize()
return unpad(padded_data)
def getRandomBytes(size):
return base64.b64encode(os.urandom(size))
def getRandomKey():
return getRandomBytes(KEY_SIZE)
def getRandomIV():
return getRandomBytes(IV_SIZE)
def encryptDict(dct, key, iv):
def _encrypt(v):
return encrypt(v, key, iv)
return util.jsonMap(_encrypt, dct)
def decryptDict(encrypted_dict, key, iv):
def _decrypt(v):
return decrypt(v, key, iv)
return util.jsonMap(_decrypt, encrypted_dict)
```
#### File: python-keepasshttp/keepasshttp/password.py
```python
class Password(object):
"""A simple object that stores a password but prints '***'
This helps prevent accidentally printing a password to the terminal.
"""
def __init__(self, password):
self.value = password
def __str__(self):
return '*****'
def __repr__(self):
return '{}(*****)'.format(self.__class__.__name__)
def _isPassword(key):
return key.lower() == 'password'
def replace(mapping):
"""Replaces the values for keys that look like passwords"""
return {k: Password(v) if _isPassword(k) else v for k, v in mapping.items()}
``` |
{
"source": "jobevers/vex",
"score": 3
} |
#### File: vex/vex/exceptions.py
```python
import sys
class InvalidArgument(Exception):
"""Raised by anything under main() to propagate errors to user.
"""
def __init__(self, message):
self.message = message
Exception.__init__(self, message)
class NoVirtualenvName(InvalidArgument):
"""No virtualenv name was given (insufficient arguments).
"""
pass
class NoVirtualenvsDirectory(InvalidArgument):
"""There is no directory to find named virtualenvs in.
"""
pass
class OtherShell(InvalidArgument):
"""The given argument to --shell-config is not recognized.
"""
pass
class UnknownArguments(InvalidArgument):
"""Unknown arguments were given on the command line.
This is a byproduct of having to use parse_known_args.
"""
pass
class InvalidVexrc(InvalidArgument):
"""config file specified or required but absent or unparseable.
"""
pass
class InvalidVirtualenv(InvalidArgument):
"""No usable virtualenv was found.
"""
pass
class InvalidCommand(InvalidArgument):
"""No runnable command was found.
"""
pass
class InvalidCwd(InvalidArgument):
"""cwd specified or required but unusable.
"""
pass
class BadConfig(InvalidArgument):
"""raised to halt on fatal conditions on the way to run.
"""
pass
class VirtualenvAlreadyMade(InvalidArgument):
"""could not make virtualenv as one already existed.
"""
pass
class VirtualenvNotMade(InvalidArgument):
"""could not make virtualenv.
"""
pass
class VirtualenvNotRemoved(InvalidArgument):
"""raised when virtualenv could not be removed.
"""
pass
if sys.version_info > (3, 3):
CommandNotFoundError = FileNotFoundError
else:
CommandNotFoundError = OSError
```
#### File: vex/vex/main.py
```python
import sys
import os
from vex import config
from vex.options import get_options
from vex.run import get_environ, run
from vex.shell_config import handle_shell_config
from vex.make import handle_make
from vex.remove import handle_remove
from vex import exceptions
from vex._version import VERSION
def get_vexrc(options, environ):
"""Get a representation of the contents of the config file.
:returns:
a Vexrc instance.
"""
# Complain if user specified nonexistent file with --config.
# But we don't want to complain just because ~/.vexrc doesn't exist.
if options.config and not os.path.exists(options.config):
raise exceptions.InvalidVexrc("nonexistent config: {0!r}".format(options.config))
filename = options.config or os.path.expanduser('~/.vexrc')
vexrc = config.Vexrc.from_file(filename, environ)
return vexrc
def get_cwd(options):
"""Discover what directory the command should run in.
"""
if not options.cwd:
return None
if not os.path.exists(options.cwd):
raise exceptions.InvalidCwd(
"can't --cwd to invalid path {0!r}".format(options.cwd))
return options.cwd
def get_virtualenv_name(options):
if options.path:
return os.path.dirname(options.path)
else:
ve_name = options.rest.pop(0) if options.rest else ''
if not ve_name:
raise exceptions.NoVirtualenvName(
"could not find a virtualenv name in the command line."
)
return ve_name
def get_virtualenv_path(ve_base, ve_name):
"""Check a virtualenv path, raising exceptions to explain problems.
"""
if not ve_base:
raise exceptions.NoVirtualenvsDirectory(
"could not figure out a virtualenvs directory. "
"make sure $HOME is set, or $WORKON_HOME,"
" or set virtualenvs=something in your .vexrc")
# Using this requires get_ve_base to pass through nonexistent dirs
if not os.path.exists(ve_base):
message = (
"virtualenvs directory {0!r} not found. "
"Create it or use vex --make to get started."
).format(ve_base)
raise exceptions.NoVirtualenvsDirectory(message)
if not ve_name:
raise exceptions.InvalidVirtualenv("no virtualenv name")
# n.b.: if ve_name is absolute, ve_base is discarded by os.path.join,
# and an absolute path will be accepted as first arg.
# So we check if they gave an absolute path as ve_name.
# But we don't want this error if $PWD == $WORKON_HOME,
# in which case 'foo' is a valid relative path to virtualenv foo.
ve_path = os.path.join(ve_base, ve_name)
if ve_path == ve_name and os.path.basename(ve_name) != ve_name:
raise exceptions.InvalidVirtualenv(
'To run in a virtualenv by its path, '
'use "vex --path {0}"'.format(ve_path))
ve_path = os.path.abspath(ve_path)
if not os.path.exists(ve_path):
raise exceptions.InvalidVirtualenv(
"no virtualenv found at {0!r}.".format(ve_path))
return ve_path
def get_command(options, vexrc, environ):
"""Get a command to run.
:returns:
a list of strings representing a command to be passed to Popen.
"""
command = options.rest
if not command:
command = vexrc.get_shell(environ)
if command and command[0].startswith('--'):
raise exceptions.InvalidCommand(
"don't put flags like '%s' after the virtualenv name."
% command[0])
if not command:
raise exceptions.InvalidCommand("no command given")
return command
def handle_version():
sys.stdout.write(VERSION + "\n")
return 0
def handle_list(ve_base, prefix=""):
if not os.path.isdir(ve_base):
sys.stderr.write("no virtualenvs directory at {0!r}\n".format(ve_base))
return 1
text = "\n".join(
sorted(
relative_path for relative_path in os.listdir(ve_base)
if (not relative_path.startswith("-"))
and relative_path.startswith(prefix)
and os.path.isdir(os.path.join(ve_base, relative_path))
)
)
sys.stdout.write(text + "\n")
return 0
def _main(environ, argv):
"""Logic for main(), with less direct system interaction.
Routines called here raise InvalidArgument with messages that
should be delivered on stderr, to be caught by main.
"""
options = get_options(argv)
if options.version:
return handle_version()
vexrc = get_vexrc(options, environ)
# Handle --shell-config as soon as its arguments are available.
if options.shell_to_configure:
return handle_shell_config(options.shell_to_configure, vexrc, environ)
if options.list is not None:
return handle_list(vexrc.get_ve_base(environ), options.list)
# Do as much as possible before a possible make, so errors can raise
# without leaving behind an unused virtualenv.
# get_virtualenv_name is destructive and must happen before get_command
cwd = get_cwd(options)
ve_base = vexrc.get_ve_base(environ)
ve_name = get_virtualenv_name(options)
command = get_command(options, vexrc, environ)
# Either we create ve_path, get it from options.path or find it
# in ve_base.
if options.make:
if options.path:
make_path = os.path.abspath(options.path)
else:
make_path = os.path.abspath(os.path.join(ve_base, ve_name))
handle_make(environ, options, make_path)
ve_path = make_path
elif options.path:
ve_path = os.path.abspath(options.path)
if not os.path.exists(ve_path) or not os.path.isdir(ve_path):
raise exceptions.InvalidVirtualenv(
"argument for --path is not a directory")
else:
try:
ve_path = get_virtualenv_path(ve_base, ve_name)
except exceptions.NoVirtualenvName:
options.print_help()
raise
# get_environ has to wait until ve_path is defined, which might
# be after a make; of course we can't run until we have env.
env = get_environ(environ, vexrc['env'], ve_path)
returncode = run(command, env=env, cwd=cwd)
if options.remove:
handle_remove(ve_path)
if returncode is None:
raise exceptions.InvalidCommand(
"command not found: {0!r}".format(command[0]))
return returncode
def main():
"""The main command-line entry point, with system interactions.
"""
argv = sys.argv[1:]
returncode = 1
try:
returncode = _main(os.environ, argv)
except exceptions.InvalidArgument as error:
if error.message:
sys.stderr.write("Error: " + error.message + '\n')
else:
raise
sys.exit(returncode)
```
#### File: vex/vex/make.py
```python
import os
import sys
import distutils.spawn
from vex.run import run
from vex import exceptions
PYDOC_SCRIPT = """#!/usr/bin/env python
from pydoc import cli
cli()
""".encode('ascii')
PYDOC_BATCH = """
@python -m pydoc %*
""".encode('ascii')
def handle_make(environ, options, make_path):
if os.path.exists(make_path):
# Can't ignore existing virtualenv happily because existing one
# might have different parameters and --make implies nonexistent
raise exceptions.VirtualenvAlreadyMade(
"virtualenv already exists: {0!r}".format(make_path)
)
ve_base = os.path.dirname(make_path)
if not os.path.exists(ve_base):
os.mkdir(ve_base)
elif not os.path.isdir(ve_base):
raise exceptions.VirtualenvNotMade(
"could not make virtualenv: "
"{0!r} already exists but is not a directory. "
"Choose a different virtualenvs path using ~/.vexrc "
"or $WORKON_HOME, or remove the existing file; "
"then rerun your vex --make command.".format(ve_base)
)
# TODO: virtualenv is usually not on PATH for Windows,
# but finding it is a terrible issue.
if os.name == 'nt' and not os.environ.get('VIRTUAL_ENV', ''):
ve = os.path.join(
os.path.dirname(sys.executable),
'Scripts',
'virtualenv'
)
else:
ve = 'virtualenv'
args = [ve, make_path]
if options.python:
if os.name == 'nt':
python = distutils.spawn.find_executable(options.python)
if python:
options.python = python
args += ['--python', options.python]
if options.site_packages:
args += ['--system-site-packages']
if options.always_copy:
args+= ['--always-copy']
returncode = run(args, env=environ, cwd=ve_base)
if returncode != 0:
raise exceptions.VirtualenvNotMade("error creating virtualenv")
if os.name != 'nt':
pydoc_path = os.path.join(make_path, 'bin', 'pydoc')
with open(pydoc_path, 'wb') as out:
out.write(PYDOC_SCRIPT)
perms = os.stat(pydoc_path).st_mode
os.chmod(pydoc_path, perms | 0o0111)
else:
pydoc_path = os.path.join(make_path, 'Scripts', 'pydoc.bat')
with open(pydoc_path, 'wb') as out:
out.write(PYDOC_BATCH)
``` |
{
"source": "jobez/tinkerpop",
"score": 2
} |
#### File: tests/driver/test_driver_remote_connection.py
```python
import pytest
from tornado import ioloop, gen
from gremlin_python import statics
from gremlin_python.driver.protocol import GremlinServerError
from gremlin_python.statics import long
from gremlin_python.driver.driver_remote_connection import (
DriverRemoteConnection)
from gremlin_python.process.traversal import Traverser
from gremlin_python.process.traversal import TraversalStrategy
from gremlin_python.process.traversal import Bindings
from gremlin_python.process.traversal import P
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.anonymous_traversal import traversal
from gremlin_python.structure.graph import Vertex
from gremlin_python.process.strategies import SubgraphStrategy, ReservedKeysVerificationStrategy
__author__ = '<NAME> (http://markorodriguez.com)'
class TestDriverRemoteConnection(object):
def test_traversals(self, remote_connection):
statics.load_statics(globals())
g = traversal().withRemote(remote_connection)
assert long(6) == g.V().count().toList()[0]
# #
assert Vertex(1) == g.V(1).next()
assert 1 == g.V(1).id().next()
assert Traverser(Vertex(1)) == g.V(1).nextTraverser()
assert 1 == len(g.V(1).toList())
assert isinstance(g.V(1).toList(), list)
results = g.V().repeat(out()).times(2).name
results = results.toList()
assert 2 == len(results)
assert "lop" in results
assert "ripple" in results
# #
assert 10 == g.V().repeat(both()).times(5)[0:10].count().next()
assert 1 == g.V().repeat(both()).times(5)[0:1].count().next()
assert 0 == g.V().repeat(both()).times(5)[0:0].count().next()
assert 4 == g.V()[2:].count().next()
assert 2 == g.V()[:2].count().next()
# #
results = g.withSideEffect('a', ['josh', 'peter']).V(1).out('created').in_('created').values('name').where(P.within('a')).toList()
assert 2 == len(results)
assert 'josh' in results
assert 'peter' in results
# #
results = g.V().out().profile().toList()
assert 1 == len(results)
assert 'metrics' in results[0]
assert 'dur' in results[0]
# #
results = g.V().has('name', 'peter').as_('a').out('created').as_('b').select('a', 'b').by(
__.valueMap()).toList()
assert 1 == len(results)
assert 'peter' == results[0]['a']['name'][0]
assert 35 == results[0]['a']['age'][0]
assert 'lop' == results[0]['b']['name'][0]
assert 'java' == results[0]['b']['lang'][0]
assert 2 == len(results[0]['a'])
assert 2 == len(results[0]['b'])
# #
results = g.V(1).inject(g.V(2).next()).values('name').toList()
assert 2 == len(results)
assert 'marko' in results
assert 'vadas' in results
# #
results = g.V().has('person', 'name', 'marko').map(lambda: ("it.get().value('name')", "gremlin-groovy")).toList()
assert 1 == len(results)
assert 'marko' in results
# #
# this test just validates that the underscored versions of steps conflicting with Gremlin work
# properly and can be removed when the old steps are removed - TINKERPOP-2272
results = g.V().filter_(__.values('age').sum_().and_(
__.max_().is_(gt(0)), __.min_().is_(gt(0)))).range_(0, 1).id_().next()
assert 1 == results
# #
# test binding in P
results = g.V().has('person', 'age', Bindings.of('x', lt(30))).count().next()
assert 2 == results
def test_lambda_traversals(self, remote_connection):
statics.load_statics(globals())
assert "remoteconnection[ws://localhost:45940/gremlin,gmodern]" == str(remote_connection)
g = traversal().withRemote(remote_connection)
assert 24.0 == g.withSack(1.0, lambda: ("x -> x + 1", "gremlin-groovy")).V().both().sack().sum().next()
assert 24.0 == g.withSack(lambda: ("{1.0d}", "gremlin-groovy"), lambda: ("x -> x + 1", "gremlin-groovy")).V().both().sack().sum().next()
assert 48.0 == g.withSack(1.0, lambda: ("x, y -> x + y + 1", "gremlin-groovy")).V().both().sack().sum().next()
assert 48.0 == g.withSack(lambda: ("{1.0d}", "gremlin-groovy"), lambda: ("x, y -> x + y + 1", "gremlin-groovy")).V().both().sack().sum().next()
def test_iteration(self, remote_connection):
statics.load_statics(globals())
g = traversal().withRemote(remote_connection)
t = g.V().count()
assert t.hasNext()
assert t.hasNext()
assert t.hasNext()
assert t.hasNext()
assert t.hasNext()
assert 6 == t.next()
assert not(t.hasNext())
assert not(t.hasNext())
assert not(t.hasNext())
assert not(t.hasNext())
assert not(t.hasNext())
t = g.V().has('name', P.within('marko', 'peter')).values('name').order()
assert "marko" == t.next()
assert t.hasNext()
assert t.hasNext()
assert t.hasNext()
assert t.hasNext()
assert t.hasNext()
assert "peter" == t.next()
assert not(t.hasNext())
assert not(t.hasNext())
assert not(t.hasNext())
assert not(t.hasNext())
assert not(t.hasNext())
try:
t.next()
assert False
except StopIteration:
assert True
def test_strategies(self, remote_connection):
statics.load_statics(globals())
g = traversal().withRemote(remote_connection). \
withStrategies(TraversalStrategy("SubgraphStrategy",
{"vertices": __.hasLabel("person"),
"edges": __.hasLabel("created")},
"org.apache.tinkerpop.gremlin.process.traversal.strategy.decoration.SubgraphStrategy"))
assert 4 == g.V().count().next()
assert 0 == g.E().count().next()
assert 1 == g.V().label().dedup().count().next()
assert 4 == g.V().filter(lambda: ("x -> true", "gremlin-groovy")).count().next()
assert "person" == g.V().label().dedup().next()
#
g = traversal().withRemote(remote_connection). \
withStrategies(SubgraphStrategy(vertices=__.hasLabel("person"), edges=__.hasLabel("created")))
assert 4 == g.V().count().next()
assert 0 == g.E().count().next()
assert 1 == g.V().label().dedup().count().next()
assert "person" == g.V().label().dedup().next()
#
g = traversal().withRemote(remote_connection). \
withStrategies(SubgraphStrategy(edges=__.hasLabel("created")))
assert 6 == g.V().count().next()
assert 4 == g.E().count().next()
assert 1 == g.E().label().dedup().count().next()
assert "created" == g.E().label().dedup().next()
#
g = g.withoutStrategies(SubgraphStrategy). \
withComputer(vertices=__.has("name", "marko"), edges=__.limit(0))
assert 1 == g.V().count().next()
assert 0 == g.E().count().next()
assert "person" == g.V().label().next()
assert "marko" == g.V().name.next()
#
g = traversal().withRemote(remote_connection).withComputer()
assert 6 == g.V().count().next()
assert 6 == g.E().count().next()
#
g = traversal().withRemote(remote_connection). \
withStrategies(ReservedKeysVerificationStrategy(throw_exception=True))
try:
g.addV("person").property("id", "please-don't-use-id").iterate()
assert False
except GremlinServerError as gse:
assert gse.status_code == 500
def test_clone(self, remote_connection):
g = traversal().withRemote(remote_connection)
t = g.V().count()
assert 6 == t.next()
assert 6 == t.clone().next()
assert 6 == t.clone().next()
``` |
{
"source": "job-hax/agentv1",
"score": 3
} |
#### File: src/accounts/linkedin_utils.py
```python
import requests
from bs4 import BeautifulSoup as bs
from bs4.element import Comment, NavigableString
from .gmail_utils import find_nth
from django.core import serializers
from datetime import datetime
def parse_job_detail(body):
"""Parse html body and get job posting details
Args:
body: email html body
Returns:
String values which represent details of job post in JSON format.
"""
try:
#start = datetime.utcnow().timestamp()
#print(start)
link = body[find_nth(body, 'https://www.linkedin.com/comm/jobs/view/', 1) : find_nth(body, '?trk', 1)]
print(link)
url = requests.get(link)
htmltext = url.text
s = find_nth(htmltext, '<code id="viewJobMetaTagModule">', 1)
e = htmltext.rfind('--></code>') + 10
plainData = htmltext[s : e]
plainData = plainData.replace('<!--','')
plainData = plainData.replace('-->','')
#print(plainData)
soup = bs(plainData, "html.parser")
try:
posterInformation = soup.find('code', id='posterInformationModule')
posterInformationJSON = posterInformation.getText()
except:
posterInformationJSON = '{}'
try:
decoratedJobPosting = soup.find('code', id='decoratedJobPostingModule')
decoratedJobPostingJSON = decoratedJobPosting.getText()
except:
decoratedJobPostingJSON = '{}'
try:
topCardV2 = soup.find('code', id='topCardV2Module')
topCardV2JSON = topCardV2.getText()
except:
topCardV2JSON = '{}'
#end = datetime.utcnow().timestamp()
#print(end)
return posterInformationJSON, decoratedJobPostingJSON, topCardV2JSON
except Exception as e:
print(e)
return '{}','{}','{}'
```
#### File: accounts/tests/test_views.py
```python
from django.test import TestCase
from django.urls import reverse
import json
# Create your tests here.
from accounts.models import ApplicationStatus, JobApplication, Profile, JobPostDetail
from django.contrib.auth.models import User
class LoginViewTest(TestCase):
def setUp(self):
test_user1 = User.objects.create_user(username='testuser1', password='<PASSWORD>')
test_user1.save()
def test_login(self):
response = self.client.post(reverse('login'), {'username': 'testuser1', 'password': '<PASSWORD>'})
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/accounts/dashboard')
class RegisterViewTest(TestCase):
def test_register(self):
response = self.client.post(reverse('register'), {'first_name': 'firstname',
'last_name': 'lastname','username': 'usernametest','email': '<EMAIL>','password': '<PASSWORD>'
, 'password2': '<PASSWORD>'})
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/accounts/login')
class AddJobApplicationViewTest(TestCase):
def setUp(self):
status = ApplicationStatus.objects.create(pk=1, value='N/A')
status.save()
test_user1 = User.objects.create_user(username='testuser1', password='<PASSWORD>')
test_user1.save()
login = self.client.login(username='testuser1', password='<PASSWORD>')
def test_add_job_application(self):
response = self.client.post(reverse('addJobApplication'), json.dumps({"job_title":"test","company":"testcompany","applicationdate":"2018-01-01","status":"1","source":"testsource"}),content_type='application/json')
self.assertEqual(response.status_code, 200)
class ProfileViewTest(TestCase):
def setUp(self):
test_user1 = User.objects.create_user(username='testuser1', password='<PASSWORD>')
test_user1.save()
login = self.client.login(username='testuser1', password='<PASSWORD>')
def test_open_profile(self):
response = self.client.post(reverse('profile'))
self.assertEqual(response.status_code, 200)
```
#### File: src/accounts/views.py
```python
from django.shortcuts import render, redirect
from django.contrib import messages, auth
from django.contrib.auth.models import User
from .models import JobApplication
from .models import ApplicationStatus
from .models import Profile
from .models import JobPostDetail
from .gmail_lookup import fetchJobApplications
from .linkedin_lookup import get_profile
from django.http import HttpResponseRedirect
from background_task import background
from django.db.models import Q
import datetime
from dateutil import tz
from django.db.models import Count
from django.core import serializers
import json
from django.http import JsonResponse
def register(request):
if request.method == 'POST':
# Get form values
first_name = request.POST['first_name']
last_name = request.POST['last_name']
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
password2 = request.POST['<PASSWORD>']
# Check if passwords match
if password == <PASSWORD>:
# Check username
if User.objects.filter(username=username).exists():
messages.error(request, 'That username is taken')
return redirect('register')
else:
if User.objects.filter(email=email).exists():
messages.error(request, 'That email is being used')
return redirect('register')
else:
# Looks good
user = User.objects.create_user(username=username, password=password,email=email, first_name=first_name, last_name=last_name)
# Login after register
# auth.login(request, user)
# messages.success(request, 'You are now logged in')
# return redirect('index')
user.save()
messages.success(request, 'You are now registered and can log in')
return redirect('login')
else:
messages.error(request, 'Passwords do not match')
return redirect('register')
else:
return render(request, 'accounts/register.html')
def update_user(request):
user = request.user
user.username = request.POST['newusername']
user.first_name = request.POST['newuserfirstname']
user.last_name = request.POST['newuserlastname']
user.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
def delete_account(request):
request.user.delete()
auth.logout(request)
return redirect('index')
def login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
return redirect('dashboard')
else:
messages.error(request, 'Invalid credentials')
return redirect('login')
else:
return render(request, 'accounts/login.html')
def logout(request):
if request.method == 'POST':
auth.logout(request)
return redirect('index')
def social_user(backend, uid, user=None, *args, **kwargs):
'''OVERRIDED: It will logout the current user
instead of raise an exception '''
provider = backend.name
social = backend.strategy.storage.user.get_social_auth(provider, uid)
if social:
if user and social.user != user:
logout(backend.strategy.request)
#msg = 'This {0} account is already in use.'.format(provider)
#raise AuthAlreadyAssociated(backend, msg)
elif not user:
user = social.user
return {'social': social,
'user': user,
'is_new': user is None,
'new_association': False}
def updateJobApplication(request):
if request.method == 'POST':
user_job_app = JobApplication.objects.get(pk=request.POST['pk'])
status = request.POST['ddStatus']
user_job_app.applicationStatus = ApplicationStatus.objects.get(pk=status)
user_job_app.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
else:
return dashboard(request)
def deleteJobApplication(request):
if request.method == 'POST':
user_job_app = JobApplication.objects.get(pk=request.POST['pk'])
user_job_app.delete()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
else:
return dashboard(request)
@background(schedule=1)
def scheduleFetcher(user_id):
user = User.objects.get(pk=user_id)
if user.social_auth.filter(provider='google-oauth2'):
fetchJobApplications(user)
def getStatuses(request):
statuses = ApplicationStatus.objects.all()
data = serializers.serialize("json", statuses)
return JsonResponse(data)
def dashboard(request):
user_job_apps = JobApplication.objects.filter(user_id=request.user.id).order_by('-applyDate')
statuses = ApplicationStatus.objects.all()
if len(statuses) == 0:
dummyStatus = ApplicationStatus(value = 'N/A')
dummyStatus.save()
dummyStatus = ApplicationStatus(value = 'Planning')
dummyStatus.save()
dummyStatus = ApplicationStatus(value = 'In Progress')
dummyStatus.save()
dummyStatus = ApplicationStatus(value = 'Offer')
dummyStatus.save()
dummyStatus = ApplicationStatus(value = 'Fail')
dummyStatus.save()
statuses = ApplicationStatus.objects.all()
#it'll be used for background tasking in production
#refs. https://medium.com/@robinttt333/running-background-tasks-in-django-f4c1d3f6f06e
#https://django-background-tasks.readthedocs.io/en/latest/
#https://stackoverflow.com/questions/41205607/how-to-activate-the-process-queue-in-django-background-tasks
#scheduleFetcher.now(request.user.id)
scheduleFetcher(request.user.id)
if request.user.social_auth.filter(provider='linkedin-oauth2'):
get_profile(request.user)
profile = Profile.objects.get(user_id= request.user.id)
if(profile.gmail_last_update_time == 0):
last_sync_time = "Syncing..."
else:
last_sync_time = datetime.datetime.utcfromtimestamp(profile.gmail_last_update_time)
context = {
'job_apps': user_job_apps,
'last_sync_time': last_sync_time,
'statuses': statuses
}
return render(request, 'accounts/dashboard.html', context)
def addJobApplication(request):
if request.method == 'POST':
body = json.loads(request.body)
job_title = body['job_title']
company = body['company']
applicationdate = body['applicationdate']
status = int(body['status'])
source = body['source']
japp = JobApplication(jobTitle=job_title, company=company, applyDate=applicationdate, msgId='', source =source, user = request.user, companyLogo = '/static/images/JobHax-logo-black.svg')
japp.applicationStatus = ApplicationStatus.objects.get(pk=status)
japp.save()
return JsonResponse({'success':True})
def filterJobApplications(request):
if request.method == 'POST':
start = request.POST['start']
end = request.POST['end']
query = JobApplication.objects.filter(user_id=request.user.id)
if start != '':
query = query.filter(applyDate__gte=start)
if end != '':
query = query.filter(applyDate__lte=end)
user_job_apps = query.order_by('-applyDate')
statuses = ApplicationStatus.objects.all()
profile = Profile.objects.get(user_id = request.user.id)
if(profile.gmail_last_update_time == 0):
last_sync_time = "Syncing..."
else:
last_sync_time = datetime.datetime.fromtimestamp(profile.gmail_last_update_time)
context = {
'job_apps': user_job_apps,
'last_sync_time': last_sync_time,
'statuses': statuses
}
return render(request, 'accounts/dashboard.html', context)
else:
return dashboard(request)
def metrics(request):
context = {
}
return render(request, 'accounts/metrics.html', context)
def job_board(request):
context = {
}
return render(request, 'accounts/job_board.html', context)
def profile(request):
context = {
}
if request.user.social_auth.filter(provider='google-oauth2'):
context['google'] = True
else:
context['google'] = False
if request.user.social_auth.filter(provider='linkedin-oauth2'):
context['linkedin'] = True
profile = Profile.objects.get(user_id= request.user.id)
context['linkedin_info'] = json.dumps(profile.linkedin_info)
else:
context['linkedin'] = False
return render(request, 'accounts/profile.html', context)
def settings(request):
context = {
}
return render(request, 'accounts/settings.html', context)
def jobdetails(request):
context = {
}
return render(request, 'accounts/jobdetails.html', context)
def wordcloud(request):
context = {
}
return render(request, 'accounts/metrics/wordcloud.html', context)
def get_job_detail(request):
id = request.GET['pk']
app = JobApplication.objects.all().get(pk = id)
try:
details = JobPostDetail.objects.all().get(job_post = app)
context = {
'posterInformation': json.dumps(details.posterInformation),
'decoratedJobPosting': json.dumps(details.decoratedJobPosting),
'topCardV2': json.dumps(details.topCardV2),
'job': app
}
except:
context = {
'posterInformation': '{}',
'decoratedJobPosting': '{}',
'topCardV2': '{}',
'job': app
}
print(context)
return render(request, 'accounts/jobdetails.html', context)
def get_total_application_count(request):
count = JobApplication.objects.filter(user_id=request.user.id).count()
return JsonResponse({'count':count})
def get_application_count_by_month(request):
response = []
sources = ['Hired.com','LinkedIn','Indeed', 'Others']
for i in sources:
if i != 'Others':
appsByMonths = JobApplication.objects.filter(user_id=request.user.id,source=i,applyDate__year='2018').values('applyDate__year', 'applyDate__month').annotate(count=Count('pk'))
else:
appsByMonths = JobApplication.objects.filter(~Q(source = 'LinkedIn'),~Q(source = 'Hired.com'),~Q(source = 'Indeed'),user_id=request.user.id,applyDate__year='2018').values('applyDate__year', 'applyDate__month').annotate(count=Count('pk'))
item = {}
item['source'] = i
data = [0] * 12
for app in appsByMonths:
data[app['applyDate__month'] - 1] = app['count']
item['data'] = data
response.append(item)
return JsonResponse(response, safe=False)
def get_application_count_by_month_with_total(request):
response = []
sources = ['Hired.com','LinkedIn','Indeed', 'Others', 'Total']
for i in sources:
if i == 'Total':
appsByMonths = JobApplication.objects.filter(user_id=request.user.id,applyDate__year='2018').values('applyDate__year', 'applyDate__month').annotate(count=Count('pk'))
elif i != 'Others':
appsByMonths = JobApplication.objects.filter(user_id=request.user.id,source=i,applyDate__year='2018').values('applyDate__year', 'applyDate__month').annotate(count=Count('pk'))
else:
appsByMonths = JobApplication.objects.filter(~Q(source = 'LinkedIn'),~Q(source = 'Hired.com'),~Q(source = 'Indeed'),user_id=request.user.id,applyDate__year='2018').values('applyDate__year', 'applyDate__month').annotate(count=Count('pk'))
item = {}
item['source'] = i
data = [0] * 12
for app in appsByMonths:
data[app['applyDate__month'] - 1] = app['count']
item['data'] = data
response.append(item)
return JsonResponse(response, safe=False)
def get_count_by_statuses(request):
statuses = JobApplication.objects.filter(~Q(applicationStatus = None),user_id=request.user.id).values('applicationStatus').annotate(count=Count('pk'))
response = []
for i in statuses:
item = {}
item['name'] = ApplicationStatus.objects.get(pk=i['applicationStatus']).value
item['value'] = i['count']
response.append(item)
return JsonResponse(response, safe=False)
def get_word_count(request):
response = []
"""
job_titles = JobApplication.objects.filter(user_id=request.user.id).values('jobTitle').annotate(count=Count('pk'))
for i in job_titles:
item = {}
item['word'] = i['jobTitle']
item['value'] = i['count']
response.append(item)
statuses = JobApplication.objects.filter(~Q(applicationStatus__value = 'N/A'),user_id=request.user.id).values('applicationStatus').annotate(count=Count('pk'))
for i in statuses:
item = {}
item['word'] = ApplicationStatus.objects.get(pk=i['applicationStatus']).value
item['value'] = i['count']
response.append(item)
"""
companies = JobApplication.objects.filter(user_id=request.user.id).values('company').annotate(count=Count('pk'))
for i in companies:
item = {}
item['word'] = i['company']
item['value'] = i['count']
response.append(item)
"""
sources = JobApplication.objects.filter(user_id=request.user.id).values('source').annotate(count=Count('pk'))
for i in sources:
item = {}
item['word'] = i['source']
item['value'] = i['count']
response.append(item)
"""
return JsonResponse(response, safe=False)
def get_count_by_jobtitle_and_statuses(request):
response = {}
job_titles = JobApplication.objects.filter(~Q(applicationStatus = None),user_id=request.user.id).values('jobTitle').annotate(count=Count('pk'))
jobs = []
statuses_data = []
status_data = []
for job_title in job_titles:
jobs.append(job_title['jobTitle'])
response['jobs'] = jobs
statuses = ApplicationStatus.objects.all()
for status in statuses:
statuses_data.append(status.value)
item = {}
item['name'] = status.value
data = [0] * len(job_titles)
for i,job_title in enumerate(job_titles):
data[i] = JobApplication.objects.filter(user_id=request.user.id, jobTitle=job_title['jobTitle'], applicationStatus=status).count()
item['data'] = data
status_data.append(item)
response['statuses'] = statuses_data
response['data'] = status_data
return JsonResponse(response, safe=False)
```
#### File: src/pages/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
context = {
#put your content for binding to html
}
return render(request, 'pages/index.html', context)
def about(request):
context = {
#put your content for binding to html
}
return render(request, 'pages/about.html', context)
``` |
{
"source": "job-hax/be_ai",
"score": 4
} |
#### File: be_ai/src/data_loader.py
```python
import csv
import numpy as np
def load_data(filename,data_type):
# load the data from a .csv file named as filename
a = []
with open(filename) as csv_file:
data_file = csv.reader(csv_file)
for row in data_file:
a.append(row)
# convert to numpy array
arr = np.array(a)
# load the training data
if data_type=='train':
# separate the example data X (without ID column) and the label data y
X = arr[1:, 1:-1]
y = arr[1:, -1]
return X, y
elif data_type=='test':
# the partial dataset Y (w/o the ID and label columns)
Y = arr[1:, 1:-1]
# return the entire dataset arr and the partial dataset Y
return arr, Y
```
#### File: be_ai/src/decision_tree_classifier.py
```python
from sklearn import tree
# supervised learning (classification/prediction)
# decision tree classification/prediction
# This is done based on most significant attributes (independent variables) to make as distinct groups as possible
def DecisionTree(x_train, y_train, x_test, criterion_name):
# x_train - train input values
# y_train - train target values
# x_test - test input values
# Default criterion is gini; otherwise, entropy to create Decision Tree
model = tree.DecisionTreeClassifier(criterion = criterion_name)
# Train model
model.fit(x_train, y_train)
# Predict
y_test=model.predict(x_test)
return y_test
```
#### File: be_ai/src/svm_classifier.py
```python
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
def SVM_Classifier(kernel_name,x_train,y_train,x_test,x_train_num,x_test_num):
# x_train - train input values
# y_train - train target values
# x_test - test input values
# Create SVM classification object
model = svm.SVC(kernel=kernel_name)
for i in range(10):
valid_set_size = 0.10
# divide the original training set into training set and validation set
XTrain, XTest, yTrain, yTest = train_test_split(x_train_num, y_train, test_size=valid_set_size)
# Train the model
model.fit(XTrain, yTrain)
# Use the model to predict on the test set
yPred = model.predict(XTest)
print('the validation set size: ' + str(valid_set_size))
# Get accuracy
score = accuracy_score(yTest, yPred)
print('the validation accuracy: ' + str(score))
y_test = model.predict(x_test_num)
return y_test
``` |
{
"source": "Jobhdez/classes",
"score": 4
} |
#### File: Jobhdez/classes/lalg.py
```python
from math import sqrt
from operator import (
add,
sub,
mul,
)
class Number:
def __add__(self, other):
return self.add(other)
def __sub__(self, other):
return self.sub(other)
def __mul__(self, other):
return self.mul(other)
class Vector(Number):
def add(self, other):
"""
Vector Vector -> Vector
given two Vectors 'add' adds them together.
given: Vector([2,3,5]) + Vector([4,5,6])
expect: Vector([6,8,11])
"""
if isinstance(other, Vector):
return [s + t for s, t in zip(self.contents, other.contents)]
else:
raise ValueError("{} is not type Vector.".format(other))
def sub(self, other):
"""
Vector Vector -> Vector
given two Vectors 'sub' substracts them.
given: Vector([2,3,5]) Vector([4,6,7])
expect: Vector([-2,-3,-2])
"""
if isinstance(other, Vector):
return [s - t for s, t in zip(self.contents, other.contents)]
else:
raise ValueError("{} is not type Vector.".format(other))
def mul(self, other):
"""
Vector Vector -> Scalar or Vector Scalar -> Vector
computes the DOT-PRODUCT of the two given Vectors if other is of type VECTOR;
otherwise, if other is of type INT, it multiplies Vector by a scalar.
given: Vector([-6, 8]) * Vector([5, 12])
expect: 66
given: Vector([2,3,4,5]) * 3
expect: [6,9,12,15]
"""
if isinstance(other, Vector):
return sum([t * u for t, u in zip(self.contents, other.contents)])
elif isinstance(other, int):
return [other * t for t in self.contents]
else:
raise ValueError("{} is not of type VECTOR or INT.".format(other))
class M(Number):
def add(self, other):
"""
M M -> M
returns the SUM of the two given matrices.
given: M([[2,3,5], [5,6,7]]) + M([[3,4,5], [5,6,7]])
expect: [[5,7,10], [10,12,14]]
"""
if isinstance(other, M):
return [list(map(add,t,u)) for t, u in zip(self.contents, other.contents)]
else:
raise ValueError("{} is not of type M.".format(other))
def sub(self, other):
"""
M M -> M
returns the DIFFERENCE of the given two matrices.
given: M([[4,5,6], [5,6,7]]) - M([[2,3,4], [2,3,4]])
expect: [[2,2,2], [3,3,3]]
"""
if isinstance(other, M):
return [list(map(sub, t, u)) for t, u in zip(self.contents, other.contents)]
else:
raise ValueError("{} is not of type M.".format(other))
def mul(self, other):
"""
M M -> M or M Scalar -> M
returns the product of two matrices if the type of other is M; otherwise
if the type of other is INT then it returns the matrix multplied by a scalar.
given: M([[1,2,3], [4,5,6]]) * M([[7,8], [9,10], [11,12]])
expect: [[58,64], [139, 154]]
given: M([[2,3,4], [3,4,5]) * 4
expect: [[8,12,16], [12,16,20]]
"""
if isinstance(other, M):
return [compute_ith_vector(self.contents, other.contents, i) for i in range(len(self.contents))]
elif isinstance(other, int):
return [[other * i for i in x] for x in self.contents]
else:
raise ValueError("{} is not of type M.".format(other))
class Vec(Vector):
def __init__(self, contents):
self.contents = contents
def magnitude(self):
"""
Vec -> Scalar
given a Vec 'magnitude' returns the magnitude.
given: Vec([6,8])
expect: 10
"""
return sqrt(sum([x * x for x in self.contents]))
def is_unit_vector(self):
"""
Vector -> Bool
check if the Vector is a UNIT-VECTOR.
given: Vec([6,8])
expect: False
given: Vec([1,0,0])
expect: True
"""
return self.magnitude() == 1
class Matrix(M):
def __init__(self, contents):
self.contents = contents
def transpose(self):
return [get_first(self.contents, i) for i in range(len(self.contents[0]))]
def compute_ith_vector(m, m2, i):
return [sum([a * b for a, b in zip(m[i], get_column(m2, j))])
for j in range(len(m2[0]))]
def get_column(m, index):
c = []
for k in range(len(m)):
c.append(m[k][index])
return c
def get_first(m, index):
firsts = []
for i in range(len(m)):
firsts.append(m[i][index])
return firsts
``` |
{
"source": "Jobhdez/mnsit_classifier",
"score": 3
} |
#### File: src/model/net.py
```python
import torch
import torch.nn as nn
import torch.nn.init as init
class Net(nn.Module):
"""A representation of a convolutional neural network comprised of VGG blocks."""
def __init__(self, n_channels):
super(Net, self).__init__()
# VGG block 1
self.conv1 = nn.Conv2d(n_channels, 64, (3,3))
self.act1 = nn.ReLU()
self.pool1 = nn.MaxPool2d((2,2), stride=(2,2))
self.dropout = nn.Dropout(0.2)
# VGG block 2
self.conv2 = nn.Conv2d(64, 64, (3,3))
self.act2 = nn.ReLU()
self.pool2 = nn.MaxPool2d((2,2), stride=(2,2))
self.dropout2 = nn.Dropout(0.2)
# VGG block 3
self.conv3 = nn.Conv2d(64, 128, (3,3))
self.act3 = nn.ReLU()
self.pool3 = nn.MaxPool2d((2,2), stride=(2,2))
self.dropout3 = nn.Dropout(0.2)
# Fully connected layer
self.f1 = nn.Linear(128 * 1 * 1, 1000)
self.dropout4 = nn.Dropout(0.5)
self.act4 = nn.ReLU()
# Output layer
self.f2 = nn.Linear(1000, 10)
self.act5 = nn.Softmax(dim=1)
def forward(self, X):
"""This function forward propagates the input."""
# VGG block 1
X = self.conv1(X)
X = self.act1(X)
X = self.pool1(X)
X = self.dropout(X)
# VGG block 2
X = self.conv2(X)
X = self.act2(X)
X = self.pool2(X)
X = self.dropout2(X)
# VGG block 3
X = self.conv3(X)
X = self.act3(X)
X = self.pool3(X)
X = self.dropout3(X)
# Flatten
X = X.view(-1, 128)
# Fully connected layer
X = self.f1(X)
X = self.act4(X)
X = self.dropout4(X)
# Output layer
X = self.f2(X)
X = self.act5(X)
return X
```
#### File: mnsit_classifier/src/train.py
```python
import datetime
def training_loop(
n_epochs,
optimizer,
model,
loss_fn,
train_loader,
device):
for epoch in range(1, n_epochs + 1):
loss_train = 0.0
for i, (imgs, labels) in enumerate(train_loader):
imgs = imgs.to(device=device)
labels = labels.to(device=device)
outputs = model(imgs)
loss = loss_fn(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_train += loss.item()
if epoch == 1 or epoch % 10 == 0:
print('{} Epoch {}, Training loss {}'.format(
datetime.datetime.now(),
epoch,
loss_train / len(train_loader)))
``` |
{
"source": "JobHeroOne/jeroenhobo",
"score": 3
} |
#### File: JobHeroOne/jeroenhobo/application.py
```python
from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/trips")
def trips():
return render_template("trips.html")
@app.route("/heartrate")
def heartrate():
return render_template("heartrate.html")
if __name__ == "__main__":
app.run()
``` |
{
"source": "jobic10/crmwithdjango",
"score": 2
} |
#### File: crmwithdjango/account/views.py
```python
from django.contrib import messages
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.forms import inlineformset_factory
from django.contrib.auth.models import Group
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate, login as LOGIN, logout as LOGOUT
from .forms import *
from .models import *
from .filters import *
from .decorators import *
@login_required
@admin_only
def home(request):
orders = Order.objects.all()
customers = Customer.objects.all()
total_customers = customers.count()
total_orders = orders.count()
delivered = orders.filter(status='Delivered').count()
pending = orders.filter(status='Pending').count()
context = {
'orders': orders,
'customers': customers,
'total_customers': total_customers,
'total_orders': total_orders,
'delivered': delivered,
'pending': pending
}
return render(request, "account/dashboard.html", context)
@login_required
@admin_only
def products(request):
products = Product.objects.all()
context = {'products': products}
return render(request, "account/products.html", context)
@login_required
@admin_only
def customer(request, customer_id):
customer = get_object_or_404(Customer, id=customer_id)
orders = customer.order_set.all()
filter = OrderFilter(request.GET, queryset=orders)
orders = filter.qs
context = {
'customer': customer,
'orders': orders,
'total_orders': orders.count(),
'filter': filter
}
return render(request, "account/customer.html", context)
@login_required
def order(request):
OrderFormSet = inlineformset_factory(
Customer, Order, fields=('product',), form=CustomerOrderForm)
customer = get_object_or_404(Customer, user=request.user)
# form = OrderForm(request.POST or None, initial={'customer': customer})
formset = OrderFormSet(request.POST or None,
instance=customer, queryset=Order.objects.none())
context = {'form': formset, 'title': 'Create Order'}
if request.method == 'POST':
if formset.is_valid():
formset.save()
messages.success(request, "Order created successfully!")
return redirect(reverse('userpage'))
else:
messages.error(request, "Invalid Form Submitted")
return render(request, 'account/form.html', context)
@login_required
@allowed_users(allowed_roles=['admin'])
def create_order(request, customer_id):
OrderFormSet = inlineformset_factory(
Customer, Order, fields=('product', 'status'))
customer = get_object_or_404(Customer, id=customer_id)
# form = OrderForm(request.POST or None, initial={'customer': customer})
formset = OrderFormSet(request.POST or None,
instance=customer, queryset=Order.objects.none())
context = {'form': formset, 'title': 'Create Order'}
if request.method == 'POST':
if formset.is_valid():
formset.save()
messages.success(request, "Order created successfully!")
return redirect(reverse('customer', args=[customer.id]))
else:
messages.error(request, "Invalid Form Submitted")
return render(request, 'account/form.html', context)
@login_required
def create_general_order(request):
form = OrderForm(request.POST or None)
context = {'form': form}
if request.method == 'POST':
if form.is_valid():
form.save()
messages.success(request, "Order created successfully!")
return redirect(reverse('create_general_order'))
else:
messages.error(request, "Invalid Form Submitted")
return render(request, 'account/form.html', context)
@login_required
@admin_only
def update_order(request, order_id):
instance = get_object_or_404(Order, id=order_id)
form = OrderForm(request.POST or None, instance=instance)
context = {'form': form}
if request.method == 'POST':
if form.is_valid():
form.save()
messages.success(request, "Order updated successfully!")
return redirect(reverse('update_order', args=[order_id]))
else:
messages.error(request, "Invalid Form Submitted")
return render(request, 'account/form.html', context)
@login_required
@admin_only
def delete_order(request, order_id):
item = get_object_or_404(Order, id=order_id)
context = {'item': item}
if request.method == 'POST':
item.delete()
messages.success(request, 'Order deleted!')
return redirect('/')
return render(request, 'account/delete.html', context)
@unauthenticated_user
def login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = <PASSWORD>.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is None:
messages.error(request, "Invalid credentials")
else:
LOGIN(request, user)
messages.success(request, "Welcome back!")
return redirect(reverse('home'))
return render(request, 'account/login.html')
@unauthenticated_user
def register(request):
if request.user.is_authenticated:
return redirect(reverse('home'))
form = CreateUserForm(request.POST or None)
context = {'form': form, 'title': 'Sign up '}
if request.method == 'POST':
if form.is_valid():
user = form.save()
messages.success(request, "You are now registered!")
return redirect(reverse('login'))
else:
messages.error(request, "Please fix form errors!")
return render(request, 'account/register.html', context)
def logout(request):
if request.user.is_authenticated:
LOGOUT(request)
messages.success(request, "Thanks for the time!")
else:
messages.error(request, 'You need to be signed in to sign out')
return redirect(reverse('login'))
@login_required
@allowed_users(allowed_roles=['customer'])
def userpage(request):
orders = request.user.customer.order_set.all()
total_orders = orders.count()
delivered = orders.filter(status='Delivered').count()
pending = orders.filter(status='Pending').count()
context = {
'orders': orders,
'total_orders': total_orders,
'delivered': delivered,
'pending': pending
}
return render(request, "account/user.html", context)
@login_required
@allowed_users(allowed_roles=['customer'])
def account_settings(request):
form = CustomerForm(request.POST or None,
request.FILES or None, instance=request.user.customer)
context = {'form': form}
if request.method == 'POST':
if form.is_valid():
form.save()
messages.success(request, "Profile Updated!")
return redirect(reverse('account'))
else:
messages.error(request, 'Form has error(s), please fix!')
return render(request, 'account/account_settings.html', context)
@login_required
@allowed_users(allowed_roles=['admin'])
def create_customer(request):
form = CreateUserForm(request.POST or None)
context = {'form': form, 'title': 'Create Customer'}
if request.method == 'POST':
if form.is_valid():
form.save()
messages.success(request, "New Customer added successfully!")
return redirect(reverse('create_customer'))
else:
messages.error(request, "Please fix form errors!")
return render(request, 'account/form.html', context)
@login_required
@allowed_users(allowed_roles=['admin'])
def create_product(request):
form = CreateProductForm(request.POST or None)
context = {'form': form, 'title': 'Create Product'}
if request.method == 'POST':
if form.is_valid():
form.save()
messages.success(request, "New Product added successfully!")
return redirect(reverse('create_product'))
else:
messages.error(request, "Please fix form errors!")
return render(request, 'account/form.html', context)
``` |
{
"source": "jobic10/e-banking-django",
"score": 2
} |
#### File: e-banking-django/account/context_processors.py
```python
from .models import Customer
def this(request):
customer = Customer.objects.get(user=request.user)
return {'this': customer}
```
#### File: e-banking-django/account/forms.py
```python
from django import forms
from django.forms.widgets import DateInput
from .models import *
from django.contrib.auth.forms import UserCreationForm
class FormSettings(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(FormSettings, self).__init__(*args, **kwargs)
# Here make some changes such as:
for field in self.visible_fields():
field.field.widget.attrs['class'] = 'form-control'
class CustomerForm(FormSettings):
def __init__(self, *args, **kwargs):
super(CustomerForm, self).__init__(*args, **kwargs)
if self.instance.pk:
self.fields['balance'].disabled = True
class Meta:
model = Customer
fields = ('phone', 'date_of_birth', 'account_type',
'balance', )
widgets = {
'date_of_birth': DateInput(attrs={'type': 'date'})
}
class UserForm(UserCreationForm, FormSettings):
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
for fieldname in ['password1', 'password2']:
self.fields[fieldname].help_text = None
class Meta:
model = User
fields = ('last_name', 'first_name', 'email',
'gender', 'profile_pic', 'address')
class UserEditForm(FormSettings):
class Meta:
model = User
fields = ('last_name', 'first_name', 'email',
'gender', 'profile_pic', 'address')
```
#### File: e-banking-django/account/models.py
```python
from django.contrib.auth.models import UserManager, AbstractUser
from django.db import models
from django.contrib.auth.hashers import make_password
class CustomUserManager(UserManager):
def _create_user(self, email, password, **extra_fields):
email = self.normalize_email(email)
user = User(email=email, **extra_fields)
user.password = make_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
extra_fields.setdefault("is_staff", False)
extra_fields.setdefault("is_superuser", False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password=None, **extra_fields):
extra_fields.setdefault("is_staff", True)
extra_fields.setdefault("user_type", 1)
extra_fields.setdefault("gender", 'M')
extra_fields.setdefault("is_superuser", True)
assert extra_fields["is_staff"]
assert extra_fields["is_superuser"]
return self._create_user(email, password, **extra_fields)
class User(AbstractUser):
USER_TYPE = ((1, "Manager"), (2, "Staff"), (3, "Customer"))
GENDER = [("M", "Male"), ("F", "Female")]
last_name = models.CharField(max_length=30, null=False)
first_name = models.CharField(max_length=30, null=False)
username = None # Removed username, using email instead
email = models.EmailField(unique=True)
user_type = models.IntegerField(choices=USER_TYPE)
gender = models.CharField(max_length=1, choices=GENDER)
profile_pic = models.ImageField(null=False)
address = models.TextField()
updated_at = models.DateTimeField(auto_now=True)
USERNAME_FIELD = "email"
REQUIRED_FIELDS = []
objects = CustomUserManager()
def __str__(self):
return self.last_name + ", " + self.first_name
# Customers
class Customer(models.Model):
ACCOUNT_TYPE = [
('Savings', 'Savings'),
('Current', 'Current'),
]
user = models.OneToOneField(
User, on_delete=models.CASCADE, limit_choices_to={'user_type': 3})
account_type = models.CharField(max_length=8, choices=ACCOUNT_TYPE)
account_number = models.CharField(max_length=15)
pin = models.CharField(max_length=4)
date_of_birth = models.DateField()
balance = models.FloatField()
phone = models.CharField(max_length=11)
def __str__(self):
return str(self.user)
``` |
{
"source": "jobin05/Python",
"score": 3
} |
#### File: jobin05/Python/TelnetSever.py
```python
import logging
import socket
import select
HOSTNAME = 'localhost'
PORT = '3001'
MAXIMUM_QUEUED_CONNECTIONS = 5
RECEIVING_BUFFER_SIZE = 4096
logger = logging.getLogger(__name__)
def start_server(hostname, port):
# Get all possible binding addresses for given hostname and port.
possible_addresses = socket.getaddrinfo(
hostname,
port
)
server_socket = None
# Look for an address that will actually bind.
for family, socket_type, protocol, name, address in possible_addresses:
try:
# Create socket.
server_socket = socket.socket(family, socket_type, protocol)
# Make socket port reusable.
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind socket to the address.
server_socket.bind(address)
except OSError:
# Try another address.
continue
break
if server_socket is None:
logger.error("No suitable address available.")
return
# Listen for incoming connections.
server_socket.listen(MAXIMUM_QUEUED_CONNECTIONS)
logger.info("Listening on %s port %d." % server_socket.getsockname()[:2])
monitored_sockets = [server_socket]
try:
while True:
# Wait for any of the monitored sockets to become readable.
ready_to_read_sockets = select.select(
monitored_sockets,
tuple(),
tuple()
)[0]
for ready_socket in ready_to_read_sockets:
if ready_socket == server_socket:
# If server socket is readable, accept new client
# connection.
client_socket, client_address = server_socket.accept()
monitored_sockets.append(client_socket)
logger.info("New connection #%d on %s:%d." % (
client_socket.fileno(),
client_address[0],
client_address[1]
))
else:
message = ready_socket.recv(RECEIVING_BUFFER_SIZE)
if message:
# Client send correct message. Echo it.
#ready_socket.sendall(message)
print(message)
else:
# Client connection is lost. Handle it.
logger.info(
"Lost connection #%d." % ready_socket.fileno()
)
monitored_sockets.remove(ready_socket)
except KeyboardInterrupt:
pass
logger.info("Shutdown initiated.")
# Close client connections.
monitored_sockets.remove(server_socket)
for client_socket in monitored_sockets:
logger.info("Closing connection #%d." % client_socket.fileno())
client_socket.close()
# Close server socket.
logger.info("Shutting server down...")
server_socket.close()
if __name__ == '__main__':
# Configure logging.
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
# Start server.
start_server(HOSTNAME, PORT)
``` |
{
"source": "jobine/codelet",
"score": 3
} |
#### File: codelet/util/retryrequests.py
```python
import time
import requests
RETRY_COUNT = 3
RETRY_INTERVAL = 1000
class RetryRequests(object):
def __init__(self, count=RETRY_COUNT, interval=RETRY_INTERVAL):
'''
@param count: int, max retry count
@param interval: int, retry interval in mille seconds
'''
self.count = count
self.interval = interval
def get(self, *args, **kwargs):
with requests.session() as session:
for n in range(self.count - 1, -1, -1):
try:
r = session.get(*args, **kwargs)
if not 100 <= r.status_code < 300:
raise Exception('status code: {}, message: {}'.format(r.status_code, r.content))
return r
except (Exception, requests.exceptions.RequestException) as e:
if n > 0:
time.sleep(self.interval * 0.001)
else:
raise e
def post(self, *args, **kwargs):
with requests.session() as session:
for n in range(self.count - 1, -1, -1):
try:
r = session.post(*args, **kwargs)
if not 100 <= r.status_code < 300:
raise Exception('status code: {}, message: {}'.format(r.status_code, r.content))
return r
except (Exception, requests.exceptions.RequestException) as e:
if n > 0:
time.sleep(self.interval * 0.001)
else:
raise e
def put(self, *args, **kwargs):
with requests.session() as session:
for n in range(self.count - 1, -1, -1):
try:
r = session.put(*args, **kwargs)
if not 100 <= r.status_code < 300:
raise Exception('status code: {}, message: {}'.format(r.status_code, r.content))
return r
except (Exception, requests.exceptions.RequestException) as e:
if n > 0:
time.sleep(self.interval * 0.001)
else:
raise e
def delete(self, *args, **kwargs):
with requests.session() as session:
for n in range(self.count - 1, -1, -1):
try:
r = session.delete(*args, **kwargs)
if not 100 <= r.status_code < 300:
raise Exception('status code: {}, message: {}'.format(r.status_code, r.content))
return r
except (Exception, requests.exceptions.RequestException) as e:
if n > 0:
time.sleep(self.interval * 0.001)
else:
raise e
``` |
{
"source": "jobine/HGN",
"score": 2
} |
#### File: HGN/csr_mhqa/data_processing.py
```python
import gzip
import pickle
import json
import torch
import numpy as np
import os
from os.path import join
from tqdm import tqdm
from numpy.random import shuffle
from envs import DATASET_FOLDER
IGNORE_INDEX = -100
def get_cached_filename(f_type, config):
assert f_type in ['examples', 'features', 'graphs']
return f"cached_{f_type}_{config.model_type}_{config.max_seq_length}_{config.max_query_length}.pkl.gz"
class Example(object):
def __init__(self,
qas_id,
qas_type,
question_tokens,
doc_tokens,
sent_num,
sent_names,
sup_fact_id,
sup_para_id,
ques_entities_text,
ctx_entities_text,
para_start_end_position,
sent_start_end_position,
ques_entity_start_end_position,
ctx_entity_start_end_position,
question_text,
question_word_to_char_idx,
ctx_text,
ctx_word_to_char_idx,
edges=None,
orig_answer_text=None,
answer_in_ques_entity_ids=None,
answer_in_ctx_entity_ids=None,
answer_candidates_in_ctx_entity_ids=None,
start_position=None,
end_position=None):
self.qas_id = qas_id
self.qas_type = qas_type
self.question_tokens = question_tokens
self.doc_tokens = doc_tokens
self.question_text = question_text
self.sent_num = sent_num
self.sent_names = sent_names
self.sup_fact_id = sup_fact_id
self.sup_para_id = sup_para_id
self.ques_entities_text = ques_entities_text
self.ctx_entities_text = ctx_entities_text
self.para_start_end_position = para_start_end_position
self.sent_start_end_position = sent_start_end_position
self.ques_entity_start_end_position = ques_entity_start_end_position
self.ctx_entity_start_end_position = ctx_entity_start_end_position
self.question_word_to_char_idx = question_word_to_char_idx
self.ctx_text = ctx_text
self.ctx_word_to_char_idx = ctx_word_to_char_idx
self.edges = edges
self.orig_answer_text = orig_answer_text
self.answer_in_ques_entity_ids = answer_in_ques_entity_ids
self.answer_in_ctx_entity_ids = answer_in_ctx_entity_ids
self.answer_candidates_in_ctx_entity_ids= answer_candidates_in_ctx_entity_ids
self.start_position = start_position
self.end_position = end_position
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
qas_id,
doc_tokens,
doc_input_ids,
doc_input_mask,
doc_segment_ids,
query_tokens,
query_input_ids,
query_input_mask,
query_segment_ids,
para_spans,
sent_spans,
entity_spans,
q_entity_cnt,
sup_fact_ids,
sup_para_ids,
ans_type,
token_to_orig_map,
edges=None,
orig_answer_text=None,
answer_in_entity_ids=None,
answer_candidates_ids=None,
start_position=None,
end_position=None):
self.qas_id = qas_id
self.doc_tokens = doc_tokens
self.doc_input_ids = doc_input_ids
self.doc_input_mask = doc_input_mask
self.doc_segment_ids = doc_segment_ids
self.query_tokens = query_tokens
self.query_input_ids = query_input_ids
self.query_input_mask = query_input_mask
self.query_segment_ids = query_segment_ids
self.para_spans = para_spans
self.sent_spans = sent_spans
self.entity_spans = entity_spans
self.q_entity_cnt = q_entity_cnt
self.sup_fact_ids = sup_fact_ids
self.sup_para_ids = sup_para_ids
self.ans_type = ans_type
self.edges = edges
self.token_to_orig_map = token_to_orig_map
self.orig_answer_text = orig_answer_text
self.answer_in_entity_ids = answer_in_entity_ids
self.answer_candidates_ids = answer_candidates_ids
self.start_position = start_position
self.end_position = end_position
class DataIteratorPack(object):
def __init__(self,
features, example_dict, graph_dict,
bsz, device,
para_limit, sent_limit, ent_limit, ans_ent_limit,
mask_edge_types,
sequential=False):
self.bsz = bsz
self.device = device
self.features = features
self.example_dict = example_dict
self.graph_dict = graph_dict
self.sequential = sequential
self.para_limit = para_limit
self.sent_limit = sent_limit
self.ent_limit = ent_limit
self.ans_ent_limit = ans_ent_limit
self.graph_nodes_num = 1 + para_limit + sent_limit + ent_limit
self.example_ptr = 0
self.mask_edge_types = mask_edge_types
self.max_seq_length = 512
if not sequential:
shuffle(self.features)
def refresh(self):
self.example_ptr = 0
if not self.sequential:
shuffle(self.features)
def empty(self):
return self.example_ptr >= len(self.features)
def __len__(self):
return int(np.ceil(len(self.features)/self.bsz))
def __iter__(self):
# BERT input
context_idxs = torch.LongTensor(self.bsz, self.max_seq_length)
context_mask = torch.LongTensor(self.bsz, self.max_seq_length)
segment_idxs = torch.LongTensor(self.bsz, self.max_seq_length)
# Mappings
if 'CUDA_VISIBLE_DEVICES' in os.environ and os.environ['CUDA_VISIBLE_DEVICES'] == '-1':
query_mapping = torch.Tensor(self.bsz, self.max_seq_length)
para_start_mapping = torch.Tensor(self.bsz, self.para_limit, self.max_seq_length)
para_end_mapping = torch.Tensor(self.bsz, self.para_limit, self.max_seq_length)
para_mapping = torch.Tensor(self.bsz, self.max_seq_length, self.para_limit)
sent_start_mapping = torch.Tensor(self.bsz, self.sent_limit, self.max_seq_length)
sent_end_mapping = torch.Tensor(self.bsz, self.sent_limit, self.max_seq_length)
sent_mapping = torch.Tensor(self.bsz, self.max_seq_length, self.sent_limit)
ent_start_mapping = torch.Tensor(self.bsz, self.ent_limit, self.max_seq_length)
ent_end_mapping = torch.Tensor(self.bsz, self.ent_limit, self.max_seq_length)
ent_mapping = torch.Tensor(self.bsz, self.max_seq_length, self.ent_limit)
# Mask
para_mask = torch.FloatTensor(self.bsz, self.para_limit)
sent_mask = torch.FloatTensor(self.bsz, self.sent_limit)
ent_mask = torch.FloatTensor(self.bsz, self.ent_limit)
ans_cand_mask = torch.FloatTensor(self.bsz, self.ent_limit)
# Label tensor
y1 = torch.LongTensor(self.bsz)
y2 = torch.LongTensor(self.bsz)
q_type = torch.LongTensor(self.bsz)
is_support = torch.FloatTensor(self.bsz, self.sent_limit)
is_gold_para = torch.FloatTensor(self.bsz, self.para_limit)
is_gold_ent = torch.FloatTensor(self.bsz)
# Graph related
graphs = torch.Tensor(self.bsz, self.graph_nodes_num, self.graph_nodes_num)
else:
query_mapping = torch.Tensor(self.bsz, self.max_seq_length).cuda(self.device)
para_start_mapping = torch.Tensor(self.bsz, self.para_limit, self.max_seq_length).cuda(self.device)
para_end_mapping = torch.Tensor(self.bsz, self.para_limit, self.max_seq_length).cuda(self.device)
para_mapping = torch.Tensor(self.bsz, self.max_seq_length, self.para_limit).cuda(self.device)
sent_start_mapping = torch.Tensor(self.bsz, self.sent_limit, self.max_seq_length).cuda(self.device)
sent_end_mapping = torch.Tensor(self.bsz, self.sent_limit, self.max_seq_length).cuda(self.device)
sent_mapping = torch.Tensor(self.bsz, self.max_seq_length, self.sent_limit).cuda(self.device)
ent_start_mapping = torch.Tensor(self.bsz, self.ent_limit, self.max_seq_length).cuda(self.device)
ent_end_mapping = torch.Tensor(self.bsz, self.ent_limit, self.max_seq_length).cuda(self.device)
ent_mapping = torch.Tensor(self.bsz, self.max_seq_length, self.ent_limit).cuda(self.device)
# Mask
para_mask = torch.FloatTensor(self.bsz, self.para_limit).cuda(self.device)
sent_mask = torch.FloatTensor(self.bsz, self.sent_limit).cuda(self.device)
ent_mask = torch.FloatTensor(self.bsz, self.ent_limit).cuda(self.device)
ans_cand_mask = torch.FloatTensor(self.bsz, self.ent_limit).cuda(self.device)
# Label tensor
y1 = torch.LongTensor(self.bsz).cuda(self.device)
y2 = torch.LongTensor(self.bsz).cuda(self.device)
q_type = torch.LongTensor(self.bsz).cuda(self.device)
is_support = torch.FloatTensor(self.bsz, self.sent_limit).cuda(self.device)
is_gold_para = torch.FloatTensor(self.bsz, self.para_limit).cuda(self.device)
is_gold_ent = torch.FloatTensor(self.bsz).cuda(self.device)
# Graph related
graphs = torch.Tensor(self.bsz, self.graph_nodes_num, self.graph_nodes_num).cuda(self.device)
while True:
if self.example_ptr >= len(self.features):
break
start_id = self.example_ptr
cur_bsz = min(self.bsz, len(self.features) - start_id)
cur_batch = self.features[start_id: start_id + cur_bsz]
cur_batch.sort(key=lambda x: sum(x.doc_input_mask), reverse=True)
ids = []
for mapping in [para_mapping, para_start_mapping, para_end_mapping, \
sent_mapping, sent_start_mapping, sent_end_mapping, \
ent_mapping, ent_start_mapping, ent_end_mapping, \
ans_cand_mask,
query_mapping]:
mapping.zero_()
is_support.fill_(IGNORE_INDEX)
is_gold_para.fill_(IGNORE_INDEX)
is_gold_ent.fill_(IGNORE_INDEX)
for i in range(len(cur_batch)):
case = cur_batch[i]
context_idxs[i].copy_(torch.Tensor(case.doc_input_ids))
context_mask[i].copy_(torch.Tensor(case.doc_input_mask))
segment_idxs[i].copy_(torch.Tensor(case.doc_segment_ids))
if len(case.sent_spans) > 0:
for j in range(case.sent_spans[0][0] - 1):
query_mapping[i, j] = 1
for j, para_span in enumerate(case.para_spans[:self.para_limit]):
is_gold_flag = j in case.sup_para_ids
start, end, _ = para_span
if start <= end:
end = min(end, self.max_seq_length-1)
is_gold_para[i, j] = int(is_gold_flag)
para_mapping[i, start:end+1, j] = 1
para_start_mapping[i, j, start] = 1
para_end_mapping[i, j, end] = 1
for j, sent_span in enumerate(case.sent_spans[:self.sent_limit]):
is_sp_flag = j in case.sup_fact_ids
start, end = sent_span
if start <= end:
end = min(end, self.max_seq_length-1)
is_support[i, j] = int(is_sp_flag)
sent_mapping[i, start:end+1, j] = 1
sent_start_mapping[i, j, start] = 1
sent_end_mapping[i, j, end] = 1
for j, ent_span in enumerate(case.entity_spans[:self.ent_limit]):
start, end = ent_span
if start <= end:
end = min(end, self.max_seq_length-1)
ent_mapping[i, start:end+1, j] = 1
ent_start_mapping[i, j, start] = 1
ent_end_mapping[i, j, end] = 1
ans_cand_mask[i, j] = int(j in case.answer_candidates_ids)
is_gold_ent[i] = case.answer_in_entity_ids[0] if len(case.answer_in_entity_ids) > 0 else IGNORE_INDEX
if case.ans_type == 0 or case.ans_type == 3:
if len(case.end_position) == 0:
y1[i] = y2[i] = 0
elif case.end_position[0] < self.max_seq_length and context_mask[i][case.end_position[0]+1] == 1: # "[SEP]" is the last token
y1[i] = case.start_position[0]
y2[i] = case.end_position[0]
else:
y1[i] = y2[i] = 0
q_type[i] = case.ans_type if is_gold_ent[i] > 0 else 0
elif case.ans_type == 1:
y1[i] = IGNORE_INDEX
y2[i] = IGNORE_INDEX
q_type[i] = 1
elif case.ans_type == 2:
y1[i] = IGNORE_INDEX
y2[i] = IGNORE_INDEX
q_type[i] = 2
# ignore entity loss if there is no entity
if case.ans_type != 3:
is_gold_ent[i].fill_(IGNORE_INDEX)
tmp_graph = self.graph_dict[case.qas_id]
graph_adj = torch.from_numpy(tmp_graph['adj']).to(self.device)
for k in range(graph_adj.size(0)):
graph_adj[k, k] = 8
for edge_type in self.mask_edge_types:
graph_adj = torch.where(graph_adj == edge_type, torch.zeros_like(graph_adj), graph_adj)
graphs[i] = graph_adj
ids.append(case.qas_id)
input_lengths = (context_mask[:cur_bsz] > 0).long().sum(dim=1)
max_c_len = int(input_lengths.max())
para_mask = (para_mapping > 0).any(1).float()
sent_mask = (sent_mapping > 0).any(1).float()
ent_mask = (ent_mapping > 0).any(1).float()
self.example_ptr += cur_bsz
yield {
'context_idxs': context_idxs[:cur_bsz, :max_c_len].contiguous().to(self.device),
'context_mask': context_mask[:cur_bsz, :max_c_len].contiguous().to(self.device),
'segment_idxs': segment_idxs[:cur_bsz, :max_c_len].contiguous().to(self.device),
'context_lens': input_lengths.contiguous().to(self.device),
'y1': y1[:cur_bsz],
'y2': y2[:cur_bsz],
'ids': ids,
'q_type': q_type[:cur_bsz],
'is_support': is_support[:cur_bsz, :].contiguous(),
'is_gold_para': is_gold_para[:cur_bsz, :].contiguous(),
'is_gold_ent': is_gold_ent[:cur_bsz].contiguous(),
'query_mapping': query_mapping[:cur_bsz, :max_c_len].contiguous(),
'para_mapping': para_mapping[:cur_bsz, :max_c_len, :],
'para_start_mapping': para_start_mapping[:cur_bsz, :, :max_c_len],
'para_end_mapping': para_end_mapping[:cur_bsz, :, :max_c_len],
'para_mask': para_mask[:cur_bsz, :],
'sent_mapping': sent_mapping[:cur_bsz, :max_c_len, :],
'sent_start_mapping': sent_start_mapping[:cur_bsz, :, :max_c_len],
'sent_end_mapping': sent_end_mapping[:cur_bsz, :, :max_c_len],
'sent_mask': sent_mask[:cur_bsz, :],
'ent_mapping': ent_mapping[:cur_bsz, :max_c_len, :],
'ent_start_mapping': ent_start_mapping[:cur_bsz, :, :max_c_len],
'ent_end_mapping': ent_end_mapping[:cur_bsz, :, :max_c_len],
'ent_mask': ent_mask[:cur_bsz, :],
'ans_cand_mask': ans_cand_mask[:cur_bsz, :],
'graphs': graphs[:cur_bsz, :, :]
}
class DataHelper:
def __init__(self, gz=True, config=None):
self.DataIterator = DataIteratorPack
self.gz = gz
self.suffix = '.pkl.gz' if gz else '.pkl'
self.data_dir = join(DATASET_FOLDER, 'data_feat')
self.__train_features__ = None
self.__dev_features__ = None
self.__train_examples__ = None
self.__dev_examples__ = None
self.__train_graphs__ = None
self.__dev_graphs__ = None
self.__train_example_dict__ = None
self.__dev_example_dict__ = None
self.config = config
def get_feature_file(self, tag):
cached_filename = get_cached_filename('features', self.config)
return join(self.data_dir, tag, cached_filename)
def get_example_file(self, tag):
cached_filename = get_cached_filename('examples', self.config)
return join(self.data_dir, tag, cached_filename)
def get_graph_file(self, tag):
cached_filename = get_cached_filename('graphs', self.config)
return join(self.data_dir, tag, cached_filename)
@property
def train_feature_file(self):
return self.get_feature_file('train')
@property
def dev_feature_file(self):
return self.get_feature_file('dev_distractor')
@property
def train_example_file(self):
return self.get_example_file('train')
@property
def dev_example_file(self):
return self.get_example_file('dev_distractor')
@property
def train_graph_file(self):
return self.get_graph_file('train')
@property
def dev_graph_file(self):
return self.get_graph_file('dev_distractor')
def get_pickle_file(self, file_name):
if self.gz:
return gzip.open(file_name, 'rb')
else:
return open(file_name, 'rb')
def __get_or_load__(self, name, file):
if getattr(self, name) is None:
with self.get_pickle_file(file) as fin:
print('loading', file)
setattr(self, name, pickle.load(fin))
return getattr(self, name)
# Features
@property
def train_features(self):
return self.__get_or_load__('__train_features__', self.train_feature_file)
@property
def dev_features(self):
return self.__get_or_load__('__dev_features__', self.dev_feature_file)
# Examples
@property
def train_examples(self):
return self.__get_or_load__('__train_examples__', self.train_example_file)
@property
def dev_examples(self):
return self.__get_or_load__('__dev_examples__', self.dev_example_file)
# Graphs
@property
def train_graphs(self):
return self.__get_or_load__('__train_graphs__', self.train_graph_file)
@property
def dev_graphs(self):
return self.__get_or_load__('__dev_graphs__', self.dev_graph_file)
# Example dict
@property
def train_example_dict(self):
if self.__train_example_dict__ is None:
self.__train_example_dict__ = {e.qas_id: e for e in self.train_examples}
return self.__train_example_dict__
@property
def dev_example_dict(self):
if self.__dev_example_dict__ is None:
self.__dev_example_dict__ = {e.qas_id: e for e in self.dev_examples}
return self.__dev_example_dict__
# Feature dict
@property
def train_feature_dict(self):
return {e.qas_id: e for e in self.train_features}
@property
def dev_feature_dict(self):
return {e.qas_id: e for e in self.dev_features}
# Load
def load_dev(self):
return self.dev_features, self.dev_example_dict, self.dev_graphs
def load_train(self):
return self.train_features, self.train_example_dict, self.train_graphs
@property
def dev_loader(self):
return self.DataIterator(*self.load_dev(),
bsz=self.config.eval_batch_size,
device=self.config.device,
para_limit=self.config.max_para_num,
sent_limit=self.config.max_sent_num,
ent_limit=self.config.max_entity_num,
ans_ent_limit=self.config.max_ans_ent_num,
mask_edge_types=self.config.mask_edge_types,
sequential=True)
@property
def train_loader(self):
return self.DataIterator(*self.load_train(),
bsz=self.config.batch_size,
device=self.config.device,
para_limit=self.config.max_para_num,
sent_limit=self.config.max_sent_num,
ent_limit=self.config.max_entity_num,
ans_ent_limit=self.config.max_ans_ent_num,
mask_edge_types=self.config.mask_edge_types,
sequential=False)
```
#### File: HGN/csr_mhqa/utils.py
```python
import pickle
import torch
import json
import numpy as np
import string
import re
import os
import shutil
import collections
import logging
import torch.nn.functional as F
from torch import nn
from tqdm import tqdm
from model_envs import MODEL_CLASSES, ALL_MODELS
# from transformers.tokenization_bert import whitespace_tokenize, BasicTokenizer, BertTokenizer
from transformers import BasicTokenizer, BertTokenizer
from transformers.models.bert.tokenization_bert import whitespace_tokenize
from transformers import AdamW
from eval.hotpot_evaluate_v1 import normalize_answer, eval as hotpot_eval
from csr_mhqa.data_processing import IGNORE_INDEX
logger = logging.getLogger(__name__)
def load_encoder_model(encoder_name_or_path, model_type):
if encoder_name_or_path in [None, 'None', 'none']:
raise ValueError('no checkpoint provided for model!')
config_class, model_encoder, tokenizer_class = MODEL_CLASSES[model_type]
config = config_class.from_pretrained(encoder_name_or_path)
if config is None:
raise ValueError(f'config.json is not found at {encoder_name_or_path}')
# check if is a path
if os.path.exists(encoder_name_or_path):
if os.path.isfile(os.path.join(encoder_name_or_path, 'pytorch_model.bin')):
encoder_file = os.path.join(encoder_name_or_path, 'pytorch_model.bin')
else:
encoder_file = os.path.join(encoder_name_or_path, 'encoder.pkl')
encoder = model_encoder.from_pretrained(encoder_file, config=config)
else:
encoder = model_encoder.from_pretrained(encoder_name_or_path, config=config)
return encoder, config
def get_optimizer(encoder, model, args, learning_rate, remove_pooler=False):
"""
get BertAdam for encoder / classifier or BertModel
:param model:
:param classifier:
:param args:
:param remove_pooler:
:return:
"""
param_optimizer = list(encoder.named_parameters())
param_optimizer += list(model.named_parameters())
if remove_pooler:
param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]]
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate, eps=args.adam_epsilon)
return optimizer
def compute_loss(args, batch, start, end, para, sent, ent, q_type):
criterion = nn.CrossEntropyLoss(reduction='mean', ignore_index=IGNORE_INDEX)
binary_criterion = nn.BCEWithLogitsLoss(reduction='mean')
loss_span = args.ans_lambda * (criterion(start, batch['y1']) + criterion(end, batch['y2']))
loss_type = args.type_lambda * criterion(q_type, batch['q_type'])
sent_pred = sent.view(-1, 2)
sent_gold = batch['is_support'].long().view(-1)
loss_sup = args.sent_lambda * criterion(sent_pred, sent_gold.long())
loss_ent = args.ent_lambda * criterion(ent, batch['is_gold_ent'].long())
loss_para = args.para_lambda * criterion(para.view(-1, 2), batch['is_gold_para'].long().view(-1))
loss = loss_span + loss_type + loss_sup + loss_ent + loss_para
return loss, loss_span, loss_type, loss_sup, loss_ent, loss_para
def eval_model(args, encoder, model, dataloader, example_dict, feature_dict, prediction_file, eval_file, dev_gold_file):
encoder.eval()
model.eval()
answer_dict = {}
answer_type_dict = {}
answer_type_prob_dict = {}
dataloader.refresh()
thresholds = np.arange(0.1, 1.0, 0.05)
N_thresh = len(thresholds)
total_sp_dict = [{} for _ in range(N_thresh)]
for batch in tqdm(dataloader):
with torch.no_grad():
inputs = {'input_ids': batch['context_idxs'],
'attention_mask': batch['context_mask'],
'token_type_ids': batch['segment_idxs'] if args.model_type in ['bert', 'xlnet'] else None} # XLM don't use segment_ids
outputs = encoder(**inputs)
batch['context_encoding'] = outputs[0]
batch['context_mask'] = batch['context_mask'].float().to(args.device)
start, end, q_type, paras, sent, ent, yp1, yp2 = model(batch, return_yp=True)
type_prob = F.softmax(q_type, dim=1).data.cpu().numpy()
answer_dict_, answer_type_dict_, answer_type_prob_dict_ = convert_to_tokens(example_dict, feature_dict, batch['ids'],
yp1.data.cpu().numpy().tolist(),
yp2.data.cpu().numpy().tolist(),
type_prob)
answer_type_dict.update(answer_type_dict_)
answer_type_prob_dict.update(answer_type_prob_dict_)
answer_dict.update(answer_dict_)
predict_support_np = torch.sigmoid(sent[:, :, 1]).data.cpu().numpy()
for i in range(predict_support_np.shape[0]):
cur_sp_pred = [[] for _ in range(N_thresh)]
cur_id = batch['ids'][i]
for j in range(predict_support_np.shape[1]):
if j >= len(example_dict[cur_id].sent_names):
break
for thresh_i in range(N_thresh):
if predict_support_np[i, j] > thresholds[thresh_i]:
cur_sp_pred[thresh_i].append(example_dict[cur_id].sent_names[j])
for thresh_i in range(N_thresh):
if cur_id not in total_sp_dict[thresh_i]:
total_sp_dict[thresh_i][cur_id] = []
total_sp_dict[thresh_i][cur_id].extend(cur_sp_pred[thresh_i])
def choose_best_threshold(ans_dict, pred_file):
best_joint_f1 = 0
best_metrics = None
best_threshold = 0
for thresh_i in range(N_thresh):
prediction = {'answer': ans_dict,
'sp': total_sp_dict[thresh_i],
'type': answer_type_dict,
'type_prob': answer_type_prob_dict}
tmp_file = os.path.join(os.path.dirname(pred_file), 'tmp.json')
with open(tmp_file, 'w') as f:
json.dump(prediction, f)
metrics = hotpot_eval(tmp_file, dev_gold_file)
if metrics['joint_f1'] >= best_joint_f1:
best_joint_f1 = metrics['joint_f1']
best_threshold = thresholds[thresh_i]
best_metrics = metrics
shutil.move(tmp_file, pred_file)
return best_metrics, best_threshold
best_metrics, best_threshold = choose_best_threshold(answer_dict, prediction_file)
json.dump(best_metrics, open(eval_file, 'w'))
return best_metrics, best_threshold
def get_weights(size, gain=1.414):
weights = nn.Parameter(torch.zeros(size=size))
nn.init.xavier_uniform_(weights, gain=gain)
return weights
def get_bias(size):
bias = nn.Parameter(torch.zeros(size=size))
return bias
def get_act(act):
if act.startswith('lrelu'):
return nn.LeakyReLU(float(act.split(':')[1]))
elif act == 'relu':
return nn.ReLU()
else:
raise NotImplementedError
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = <NAME>
# orig_text = <NAME>
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "<NAME>".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
print("Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
print("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
print("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def convert_to_tokens(examples, features, ids, y1, y2, q_type_prob):
answer_dict, answer_type_dict = {}, {}
answer_type_prob_dict = {}
q_type = np.argmax(q_type_prob, 1)
def get_ans_from_pos(qid, y1, y2):
feature = features[qid]
example = examples[qid]
tok_to_orig_map = feature.token_to_orig_map
orig_all_tokens = example.question_tokens + example.doc_tokens
final_text = " "
if y1 < len(tok_to_orig_map) and y2 < len(tok_to_orig_map):
orig_tok_start = tok_to_orig_map[y1]
orig_tok_end = tok_to_orig_map[y2]
ques_tok_len = len(example.question_tokens)
if orig_tok_start < ques_tok_len and orig_tok_end < ques_tok_len:
ques_start_idx = example.question_word_to_char_idx[orig_tok_start]
ques_end_idx = example.question_word_to_char_idx[orig_tok_end] + len(example.question_tokens[orig_tok_end])
final_text = example.question_text[ques_start_idx:ques_end_idx]
else:
orig_tok_start -= len(example.question_tokens)
orig_tok_end -= len(example.question_tokens)
ctx_start_idx = example.ctx_word_to_char_idx[orig_tok_start]
ctx_end_idx = example.ctx_word_to_char_idx[orig_tok_end] + len(example.doc_tokens[orig_tok_end])
final_text = example.ctx_text[example.ctx_word_to_char_idx[orig_tok_start]:example.ctx_word_to_char_idx[orig_tok_end]+len(example.doc_tokens[orig_tok_end])]
return final_text
for i, qid in enumerate(ids):
feature = features[qid]
answer_text = ''
if q_type[i] in [0, 3]:
answer_text = get_ans_from_pos(qid, y1[i], y2[i])
elif q_type[i] == 1:
answer_text = 'yes'
elif q_type[i] == 2:
answer_text = 'no'
else:
raise ValueError("question type error")
answer_dict[qid] = answer_text
answer_type_prob_dict[qid] = q_type_prob[i].tolist()
answer_type_dict[qid] = q_type[i].item()
return answer_dict, answer_type_dict, answer_type_prob_dict
def count_parameters(model, trainable_only=True, is_dict=False):
"""
Count number of parameters in a model or state dictionary
:param model:
:param trainable_only:
:param is_dict:
:return:
"""
if is_dict:
return sum(np.prod(list(model[k].size())) for k in model)
if trainable_only:
return sum(p.numel() for p in model.parameters() if p.requires_grad)
else:
return sum(p.numel() for p in model.parameters())
``` |
{
"source": "jobine/smartAI-plugin",
"score": 2
} |
#### File: smartAI-plugin/common/plugin_model_api.py
```python
import time
from telemetry import log
from flask import Flask, request, g, jsonify, make_response
from flask_restful import Resource, Api
from common.plugin_service import PluginService
from common.util.constant import STATUS_SUCCESS, STATUS_FAIL
import logging
app = Flask(__name__)
api = Api(app)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def try_except(fn):
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as e:
log.error("-----Exception-----")
return make_response(jsonify(dict(result=STATUS_FAIL, message='Unknown error, please check your request. ' + str(e))), 502)
return wrapped
@app.route('/', methods=['GET'])
def index():
return "Welcome to TSANA Computing Platform"
@app.before_request
def before_request():
g.start = time.time()
@app.after_request
def after_request(response):
total_time = (time.time() - g.start) * 1e6
rule = str(request.url_rule)
status = str(response.status_code)
# TODO log here
request_log = '\nRequest begin-----------------------------'
request_log += '\n'
request_log += ' url: ' + str(request.url)
request_log += '\n'
request_log += ' body: ' + str(request.data)
request_log += '\n'
request_log += ' response status: ' + str(response.status)
request_log += '\n'
request_log += ' response data: ' + str(response.data)
request_log += '\n'
request_log += 'Request end-----------------------------'
log.info(request_log)
return response
class PluginModelIndexAPI(Resource):
def put(self):
return "Welcome to TSANA Computing Platform"
api.add_resource(PluginModelIndexAPI, '/')
class PluginModelAPI(Resource): # The API class that handles a single user
def __init__(self, plugin_service: PluginService):
self.__plugin_service = plugin_service
@try_except
def get(self, model_id):
return self.__plugin_service.state(request, model_id)
@try_except
def post(self, model_id):
pass
@try_except
def put(self, model_id):
pass
@try_except
def delete(self, model_id):
return self.__plugin_service.delete(request, model_id)
class PluginModelTrainAPI(Resource):
def __init__(self, plugin_service: PluginService):
self.__plugin_service = plugin_service
@try_except
def post(self):
return self.__plugin_service.train(request)
class PluginModelInferenceAPI(Resource):
def __init__(self, plugin_service: PluginService):
self.__plugin_service = plugin_service
@try_except
def post(self, model_id):
return self.__plugin_service.inference(request, model_id)
class PluginModelParameterAPI(Resource):
def __init__(self, plugin_service: PluginService):
self.__plugin_service = plugin_service
@try_except
def post(self):
return self.__plugin_service.verify(request)
class PluginModelListAPI(Resource):
def __init__(self, plugin_service: PluginService):
self.__plugin_service = plugin_service
@try_except
def get(self):
return self.__plugin_service.list_models(request)
```
#### File: common/util/timeutil.py
```python
from dateutil import parser, tz
import datetime
import dateutil
from .constant import MINT_IN_SECONDS, HOUR_IN_SECONDS, DAY_IN_SECONDS
from .gran import Gran
DT_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
DT_FILENAME_FORMAT = '%Y-%m-%dT_%H_%M_%SZ'
def convert_freq(gran, custom_in_seconds):
if gran == Gran.Yearly:
return 'Y'
if gran == Gran.Monthly:
return 'M'
if gran == Gran.Weekly:
return 'W'
if gran == Gran.Daily:
return 'D'
if gran == Gran.Hourly:
return 'H'
if gran == Gran.Minutely:
return 'T'
if gran == Gran.Secondly:
return 'S'
return '{}S'.format(custom_in_seconds)
def str_to_dt(s):
return parser.parse(s).replace(tzinfo=tz.UTC)
def dt_to_str(dt):
return dt.strftime(DT_FORMAT)
def dt_to_str_file_name(dt):
return dt.strftime(DT_FILENAME_FORMAT)
def get_diff(start, graninfo, end):
(gran_str, custom_in_seconds) = graninfo
delta = dateutil.relativedelta.relativedelta(end, start)
if gran_str == 'Daily':
diff = (end - start).total_seconds() / DAY_IN_SECONDS
elif gran_str == 'Weekly':
diff = (end - start).total_seconds() / (DAY_IN_SECONDS * 7)
elif gran_str == 'Monthly':
diff = delta.years * 12 + delta.months
elif gran_str == 'Yearly':
diff = delta.years
elif gran_str == 'Hourly':
diff = (end - start).total_seconds() / HOUR_IN_SECONDS
elif gran_str == 'Minutely':
diff = (end - start).total_seconds() / MINT_IN_SECONDS
elif gran_str == 'Secondly':
diff = (end - start).total_seconds()
elif gran_str == 'Custom':
diff = (end - start).total_seconds() / custom_in_seconds
else:
raise Exception('Granularity not supported: {}|{}'.format(*graninfo))
return int(diff)
def get_time_offset(timestamp, graninfo, offset):
(gran_str, custom_in_seconds) = graninfo
if gran_str == 'Daily':
return timestamp + datetime.timedelta(days=offset)
elif gran_str == 'Weekly':
return timestamp + datetime.timedelta(weeks=offset)
elif gran_str == 'Monthly':
return timestamp + dateutil.relativedelta.relativedelta(months=offset)
elif gran_str == 'Yearly':
return timestamp + dateutil.relativedelta.relativedelta(years=offset)
elif gran_str == 'Hourly':
return timestamp + datetime.timedelta(hours=offset)
elif gran_str == 'Minutely':
return timestamp + datetime.timedelta(minutes=offset)
elif gran_str == 'Secondly':
return timestamp + datetime.timedelta(seconds=offset)
elif gran_str == 'Custom':
return timestamp + datetime.timedelta(seconds=custom_in_seconds * offset)
else:
raise Exception('Granularity not supported: {}|{}'.format(*graninfo))
def get_time_list(start_time, end_time, graninfo):
time_list = []
(gran_str, custom_in_seconds) = graninfo
offset = 1
if gran_str == 'Daily':
timedelta = datetime.timedelta(days=offset)
elif gran_str == 'Weekly':
timedelta = datetime.timedelta(weeks=offset)
elif gran_str == 'Monthly':
timedelta = dateutil.relativedelta.relativedelta(months=offset)
elif gran_str == 'Yearly':
timedelta = dateutil.relativedelta.relativedelta(years=offset)
elif gran_str == 'Hourly':
timedelta = datetime.timedelta(hours=offset)
elif gran_str == 'Minutely':
timedelta = datetime.timedelta(minutes=offset)
elif gran_str == 'Secondly':
timedelta = datetime.timedelta(seconds=offset)
elif gran_str == 'Custom':
timedelta = datetime.timedelta(seconds=custom_in_seconds * offset)
else:
raise Exception('Granularity not supported: {}|{}'.format(*graninfo))
while start_time < end_time:
time_list.append(start_time)
start_time = start_time + timedelta
return time_list
```
#### File: forecast/util/univariate_forecast_item.py
```python
from common.util.constant import TIMESTAMP
FORECAST_VALUE = 'forecastValue'
CONFIDENCE = 'confidence'
UPPER_BOUNDARY = 'upperBoundary'
LOWER_BOUNDARY = 'lowerBoundary'
class UnivariateForecastItem:
def __init__(self, forecast_value, lower_boundary, upper_boundary, confidence, timestamp):
self.forecast_value = float(forecast_value)
self.confidence = float(confidence)
self.upper_boundary = float(upper_boundary)
self.lower_boundary = float(lower_boundary)
self.timestamp = timestamp.strftime('%Y-%m-%d %H:%M:%S')
def to_dict(self):
return {FORECAST_VALUE: self.forecast_value, CONFIDENCE: self.confidence,
UPPER_BOUNDARY: self.upper_boundary, LOWER_BOUNDARY: self.lower_boundary, TIMESTAMP: self.timestamp}
``` |
{
"source": "JobinH0od/C-O-D-E-S",
"score": 4
} |
#### File: Python/OtherProjects/aboutMePrompt.py
```python
def allQuestions():
print("What's your name?")
name = input()
print("Hello", name, "!")
print("What country do you live in?")
country = input()
print("I love", country, "!")
print("How old are you?")
age = input()
if age >= str(18):
print("I hope you've already gone to a bar!")
elif age < str(18):
print("I hope you didn't go to any bar yet!")
allQuestions()
input()
```
#### File: Python/OtherProjects/emailDefault.py
```python
import datetime
import smtplib
def emailDefault():
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login("<EMAIL>","Iact1v1ty")
server.sendmail("<EMAIL>", "<EMAIL>", "TEST")
server.quit
emailDefault()
```
#### File: Python/OtherProjects/PigLatin.py
```python
import time
def PygLatin():
pyg = "ay"
print("Welcome to my Pig Latin program!")
original = input("Enter a simple word : ")
if len(original) > 0 and original.isalpha():
print(original)
word = original.lower()
first = original[0]
else:
print("Enter a valid word please")
newWord = word + first + pyg
newWord = newWord[1:]
print(newWord)
print()
PygLatin()
time.sleep(4)
```
#### File: Python/OtherProjects/sendAtTime.py
```python
import datetime
import smtplib
while True:
def emailDefault():
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login("<EMAIL>","Iact1v1ty")
server.sendmail("<EMAIL>", "<EMAIL>", "It's 2:45 o' clock!")
server.quit
now = str(datetime.datetime.utcnow().strftime("%H:%M"))
if now == "19:45":
print("Sending...")
emailDefault()
print("Stopping...")
quit()
else:
pass
``` |
{
"source": "jobini/tic_tac_toe",
"score": 4
} |
#### File: jobini/tic_tac_toe/classes.py
```python
from random import sample
class Grid:
played_pos = set()
def __init__(self):
self.cells = [" " for i in range(0,9)]
self.cell_states = [0 for i in range(0,9)]
self.empty_win = False
def display(self):
for i in range(0,7,3):
print " __ __ __"
print "|{0} |{1} |{2} |\n".format(self.cells[i],self.cells[i+1],self.cells[i+2])
def win(self):
if (self.row_win()[0] or self.column_win()[0] or self.diagonal_win()[0]) and (not self.empty_win):
return True
else:
return False
def row_win(self):
for i in range(0,7,3):
if ((self.cells[i] == self.cells[i+1]) and (self.cells[i+1] == self.cells[i+2])):
if self.cells[i] == " ":
self.empty_win = True
else:
self.empty_win = False
return True,self.cell_states[i]
return False,None
def column_win(self):
for i in range(0,3):
if ((self.cells[i] == self.cells[i+3]) and (self.cells[i+3] == self.cells[i+6])):
if self.cells[i] == " ":
self.empty_win = True
else:
self.empty_win = False
return True,self.cell_states[i]
return False,None
def diagonal_win(self):
if ((self.cells[0] == self.cells[4]) and (self.cells[4] == self.cells[8])):
if self.cells[0] == " ":
self.empty_win = True
else:
self.empty_win = False
return True,self.cell_states[0]
if ((self.cells[2] == self.cells[4]) and (self.cells[4] == self.cells[6])):
if self.cells[2] == " ":
self.empty_win = True
else:
self.empty_win = False
return True,self.cell_states[2]
return False,None
def draw(self):
if (not self.win() and 0 not in self.cell_states):
return True
else:
return False
def is_valid(self,pos):
try:
pos = int(pos)
except:
print "Invalid entry! Try again."
return False
if pos > 0 and pos < 10:
if pos not in Grid.played_pos:
return True
else:
print "Position already played! Try again."
return False
else:
print "Positions must be from 1-9! Try again."
return False
class Player:
p_num = 1
def __init__(self,token,name):
self.token = token
self.name = name
self.number = Player.p_num
Player.p_num += 1
def play(self,pos,g):
Grid.played_pos.add(int(pos))
g.cells[int(pos)-1] = self.token
g.cell_states[int(pos)-1] = self.number
def ask_action(self):
pos = raw_input("{0}'s turn (Player {1}): ".format(self.name,self.number))
return pos
class Computer(Player):
def ask_action(self):
pos = sample({i for i in range(1,10)}.difference(Grid.played_pos),1)[0]
print "{0}'s turn: {1}".format(self.name,pos)
return pos
``` |
{
"source": "jobino/carla_control_joystick",
"score": 3
} |
#### File: modules/joystick/ControlSystem.py
```python
from modules.joystick import SystemController
from modules.button import button
from modules.joystick import PrintText
from modules.client import Connection
import pygame
import random
import os
import sys
import subprocess
SystemController = SystemController
TextOutput = PrintText.TextPrint()
os.environ["SDL_VIDEO_CENTERED"] = '1'
pygame.init()
RED = (255,0,0)
BLUE = (0,0,255)
GREEN = (0,255,0)
BLACK = (0,0,0)
WHITE = (255,255,255)
ORANGE = (255,180,0)
joystick = []
i = 0
#The button can be styled in a manner similar to CSS.
BUTTON_STYLE = {"hover_color" : BLUE
, "clicked_color" : GREEN
, "clicked_font_color" : BLACK
, "hover_font_color" : ORANGE
, "hover_sound" : pygame.mixer.Sound(os.getcwd()+"/sound.wav")
}
clock = pygame.time.Clock()
class Control(object):
def __init__(self):
self.screen = pygame.display.set_mode((500,500))
self.screen_rect = self.screen.get_rect()
self.clock = pygame.time.Clock()
self.done = False
self.fps = 60.0
self.color = WHITE
self.startClient = False
message = "Start Carla Server"
self.buttonStartCarlaServer = button.Button((0,0,110,25),RED, ServerController().start_server_carla,text=message, **BUTTON_STYLE)
self.buttonStartCarlaServer.rect.center = (70, self.screen_rect.height - 50)
self.buttonStartJoystick = button.Button((0,0,110,25),RED, self.start_joystick,text="Start Joystick", **BUTTON_STYLE)
self.buttonStartJoystick.rect.center = (190, self.screen_rect.height - 50)
self.buttonStartClient = button.Button((0,0,110,25),RED, self.connect_client,text="Start Client", **BUTTON_STYLE)
self.buttonStartClient.rect.center = (310, self.screen_rect.height - 50)
self.buttonAutoPilot = button.Button((0,0,110,25),RED, self.autopilot,text="Auto Pilot", **BUTTON_STYLE)
self.buttonAutoPilot.rect.center = (430, self.screen_rect.height - 50)
def connect_client(self):
Connection.ControlClient().start()
self.startClient = True
def autopilot(self):
if self.startClient:
Connection.ControlClient().set_autopilot()
def event_loop(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.done = True
SystemController.event_buttons_pressed(event)
self.buttonStartCarlaServer.check_event(event)
self.buttonStartJoystick.check_event(event)
self.buttonStartClient.check_event(event)
self.buttonAutoPilot.check_event(event)
def start_joystick(self):
joystick_count = pygame.joystick.get_count()
if joystick_count < 1:
return 0
if self.startClient:
for i in range(joystick_count):
joystick.append(pygame.joystick.Joystick(i))
joystick[i].init()
def joystick_loop(self):
if len(joystick) != 0:
for x in range(len(joystick)):
SystemController.get_axes_buttons_control(joystick[x], self.screen)
pygame.display.flip()
else:
TextOutput.reset()
TextOutput.plint(self.screen, "Joystick not found".format())
def main_loop(self):
while not self.done:
self.event_loop()
self.screen.fill(self.color)
self.buttonStartCarlaServer.update(self.screen)
self.buttonStartJoystick.update(self.screen)
self.buttonStartClient.update(self.screen)
self.buttonAutoPilot.update(self.screen)
self.joystick_loop()
pygame.display.update()
self.clock.tick(self.fps)
class ServerController(object):
def __init__(self):
pass
def start_server_carla(self):
exec(open(os.getcwd()+"/modules/client/StartCarlaServer.py").read())
``` |
{
"source": "jobin-sun/Local-Codeskulptor",
"score": 3
} |
#### File: Local-Codeskulptor/bin/gen_module.py
```python
import glob
import os
path="..\src"
def fun(path):
if path != "":
path = path + os.sep
for file in glob.glob(path+'*'):
print file
if os.path.isdir(file):
fun(file)
elif file.endswith('.py'):
fd = open(file,"r")
cnt = fd.read()
fd.close()
cnt=cnt.replace("\r","")
cnt=cnt.replace("\n","\\n")
cnt=cnt.replace("\"","\\\"")
modName=file.replace("\\","/")
fdLib.write("myModule[\"src/lib/"+modName+"\"]=\""+cnt+"\";\n")
os.chdir(path)
fd = open("codeskulptor.py","r")
cnt = fd.read()
fd.close()
cnt=cnt.replace("\r","")
cnt=cnt.replace("\n","\\n")
cnt=cnt.replace("\"","\\\"")
fdMain = open("../Temp/main.js","w")
fdMain.write("prog=\""+cnt+"\"")
fdMain.close()
fdLib = open("../Temp/skulpt-mylib.js","w")
fdLib.write("myModule={};\n")
fun("")
fdLib.close()
``` |
{
"source": "jobiols/odoo-env",
"score": 2
} |
#### File: odoo-env/odoo_env/test_oe.py
```python
import unittest
from odoo_env.command import Command, MakedirCommand, CreateNginxTemplate
from odoo_env.client import Client
from odoo_env.odooenv import OdooEnv
from odoo_env.config import OeConfig
from odoo_env.repos import Repo, Repo2
from odoo_env.images import Image, Image2
class TestRepository(unittest.TestCase):
def test_install(self):
""" ################################################# TEST INSTALLATION
"""
options = {
'debug': False,
'no-repos': False,
'nginx': True,
}
base_dir = '/odoo_ar/'
oe = OdooEnv(options)
cmds = oe.install('test_client')
self.assertEqual(
cmds[0].args, base_dir)
self.assertEqual(
cmds[0].command, 'sudo mkdir ' + base_dir)
self.assertEqual(
cmds[0].usr_msg, 'Installing client test_client')
self.assertEqual(
cmds[2].args, '{}odoo-9.0/test_client/postgresql'.format(base_dir))
self.assertEqual(
cmds[2].command,
'mkdir -p {}odoo-9.0/test_client/postgresql'.format(base_dir))
self.assertEqual(
cmds[2].usr_msg, False)
self.assertEqual(
cmds[3].args, '/odoo_ar/odoo-9.0/test_client/config')
self.assertEqual(
cmds[3].command, 'mkdir -p /odoo_ar/odoo-9.0/test_client/config')
self.assertEqual(
cmds[3].usr_msg, False)
self.assertEqual(
cmds[4].args, '/odoo_ar/odoo-9.0/test_client/data_dir')
self.assertEqual(
cmds[4].command, 'mkdir -p /odoo_ar/odoo-9.0/test_client/data_dir')
self.assertEqual(
cmds[4].usr_msg, False)
self.assertEqual(
cmds[5].args, '/odoo_ar/odoo-9.0/test_client/backup_dir')
self.assertEqual(
cmds[5].command,
'mkdir -p /odoo_ar/odoo-9.0/test_client/backup_dir')
self.assertEqual(
cmds[5].usr_msg, False)
self.assertEqual(
cmds[6].args, '/odoo_ar/odoo-9.0/test_client/log')
self.assertEqual(
cmds[6].command, 'mkdir -p /odoo_ar/odoo-9.0/test_client/log')
self.assertEqual(
cmds[6].usr_msg, False)
self.assertEqual(
cmds[7].args, '/odoo_ar/odoo-9.0/test_client/sources')
self.assertEqual(
cmds[7].command, 'mkdir -p /odoo_ar/odoo-9.0/test_client/sources')
self.assertEqual(
cmds[7].usr_msg, False)
self.assertEqual(
cmds[8].args, False)
self.assertEqual(
cmds[8].command, 'chmod o+w /odoo_ar/odoo-9.0/test_client/config'
)
self.assertEqual(
cmds[8].usr_msg, False)
self.assertEqual(
cmds[9].args, False)
self.assertEqual(
cmds[9].command, 'chmod o+w /odoo_ar/odoo-9.0/test_client/data_dir'
)
self.assertEqual(
cmds[9].usr_msg, False)
self.assertEqual(
cmds[10].args, False)
self.assertEqual(
cmds[10].command, 'chmod o+w /odoo_ar/odoo-9.0/test_client/log')
self.assertEqual(
cmds[10].usr_msg, False)
self.assertEqual(
cmds[11].args, False)
self.assertEqual(
cmds[11].command,
'chmod o+w /odoo_ar/odoo-9.0/test_client/backup_dir')
self.assertEqual(
cmds[11].usr_msg, False)
self.assertEqual(
cmds[12].args, '/odoo_ar/nginx/cert')
self.assertEqual(
cmds[12].command, 'mkdir -p /odoo_ar/nginx/cert')
self.assertEqual(
cmds[12].usr_msg, False)
self.assertEqual(
cmds[13].args, '/odoo_ar/nginx/conf')
self.assertEqual(
cmds[13].command, 'mkdir -p /odoo_ar/nginx/conf')
self.assertEqual(
cmds[13].usr_msg, False)
self.assertEqual(
cmds[14].args, '/odoo_ar/nginx/log')
self.assertEqual(
cmds[14].command, 'mkdir -p /odoo_ar/nginx/log')
self.assertEqual(
cmds[14].usr_msg, False)
self.assertEqual(
cmds[15].args, '/odoo_ar/nginx/conf/nginx.conf')
self.assertEqual(
cmds[15].command, '/odoo_ar/nginx/conf/nginx.conf')
self.assertEqual(
cmds[15].usr_msg, 'Generating nginx.conf template')
self.assertEqual(
cmds[16].args,
'/odoo_ar/odoo-9.0/test_client/sources/cl-test-client')
self.assertEqual(
cmds[16].command,
'git -C /odoo_ar/odoo-9.0/test_client/sources/ clone --depth 1 '
'-b 9.0 https://github.com/jobiols/cl-test-client')
self.assertEqual(
cmds[16].usr_msg,
'cloning b 9.0 jobiols/cl-test-client ')
self.assertEqual(
cmds[17].args,
'/odoo_ar/odoo-9.0/test_client/sources/cl-test-client')
self.assertEqual(
cmds[17].command,
'git -C /odoo_ar/odoo-9.0/test_client/sources/cl-test-client pull')
self.assertEqual(
cmds[17].usr_msg,
'pulling b 9.0 jobiols/cl-test-client ')
self.assertEqual(
cmds[18].args,
'/odoo_ar/odoo-9.0/test_client/sources/odoo-addons')
self.assertEqual(
cmds[18].command,
'git -C /odoo_ar/odoo-9.0/test_client/sources/ clone --depth 1 '
'-b 9.0 https://github.com/jobiols/odoo-addons')
self.assertEqual(
cmds[18].usr_msg,
'cloning b 9.0 jobiols/odoo-addons ')
self.assertEqual(
cmds[19].args,
'/odoo_ar/odoo-9.0/test_client/sources/odoo-addons')
self.assertEqual(
cmds[19].command,
'git -C /odoo_ar/odoo-9.0/test_client/sources/odoo-addons pull')
self.assertEqual(
cmds[19].usr_msg,
'pulling b 9.0 jobiols/odoo-addons ')
def test_install2(self):
""" ################################################# TEST INSTALLATION
"""
options = {
'debug': False,
'no-repos': False,
'nginx': True,
}
base_dir = '/odoo_ar/'
oe = OdooEnv(options)
cmds = oe.install('test2_client')
self.assertEqual(
cmds[0].args, base_dir)
self.assertEqual(
cmds[0].command, 'sudo mkdir ' + base_dir)
self.assertEqual(
cmds[0].usr_msg, 'Installing client test2_client')
self.assertEqual(
cmds[2].args, '{}odoo-9.0/test2_client/postgresql'.format(base_dir))
self.assertEqual(
cmds[2].command,
'mkdir -p {}odoo-9.0/test2_client/postgresql'.format(base_dir))
self.assertEqual(
cmds[2].usr_msg, False)
self.assertEqual(
cmds[3].args, '/odoo_ar/odoo-9.0/test2_client/config')
self.assertEqual(
cmds[3].command, 'mkdir -p /odoo_ar/odoo-9.0/test2_client/config')
self.assertEqual(
cmds[3].usr_msg, False)
self.assertEqual(
cmds[4].args, '/odoo_ar/odoo-9.0/test2_client/data_dir')
self.assertEqual(
cmds[4].command, 'mkdir -p /odoo_ar/odoo-9.0/test2_client/data_dir')
self.assertEqual(
cmds[4].usr_msg, False)
self.assertEqual(
cmds[5].args, '/odoo_ar/odoo-9.0/test2_client/backup_dir')
self.assertEqual(
cmds[5].command,
'mkdir -p /odoo_ar/odoo-9.0/test2_client/backup_dir')
self.assertEqual(
cmds[5].usr_msg, False)
self.assertEqual(
cmds[6].args, '/odoo_ar/odoo-9.0/test2_client/log')
self.assertEqual(
cmds[6].command, 'mkdir -p /odoo_ar/odoo-9.0/test2_client/log')
self.assertEqual(
cmds[6].usr_msg, False)
self.assertEqual(
cmds[7].args, '/odoo_ar/odoo-9.0/test2_client/sources')
self.assertEqual(
cmds[7].command, 'mkdir -p /odoo_ar/odoo-9.0/test2_client/sources')
self.assertEqual(
cmds[7].usr_msg, False)
self.assertEqual(
cmds[8].args, False)
self.assertEqual(
cmds[8].command, 'chmod o+w /odoo_ar/odoo-9.0/test2_client/config'
)
self.assertEqual(
cmds[8].usr_msg, False)
self.assertEqual(
cmds[9].args, False)
self.assertEqual(
cmds[9].command, 'chmod o+w /odoo_ar/odoo-9.0/test2_client/data_dir'
)
self.assertEqual(
cmds[9].usr_msg, False)
self.assertEqual(
cmds[10].args, False)
self.assertEqual(
cmds[10].command, 'chmod o+w /odoo_ar/odoo-9.0/test2_client/log')
self.assertEqual(
cmds[10].usr_msg, False)
self.assertEqual(
cmds[11].args, False)
self.assertEqual(
cmds[11].command,
'chmod o+w /odoo_ar/odoo-9.0/test2_client/backup_dir')
self.assertEqual(
cmds[11].usr_msg, False)
self.assertEqual(
cmds[12].args, '/odoo_ar/nginx/cert')
self.assertEqual(
cmds[12].command, 'mkdir -p /odoo_ar/nginx/cert')
self.assertEqual(
cmds[12].usr_msg, False)
self.assertEqual(
cmds[13].args, '/odoo_ar/nginx/conf')
self.assertEqual(
cmds[13].command, 'mkdir -p /odoo_ar/nginx/conf')
self.assertEqual(
cmds[13].usr_msg, False)
self.assertEqual(
cmds[14].args, '/odoo_ar/nginx/log')
self.assertEqual(
cmds[14].command, 'mkdir -p /odoo_ar/nginx/log')
self.assertEqual(
cmds[14].usr_msg, False)
self.assertEqual(
cmds[15].args, '/odoo_ar/nginx/conf/nginx.conf')
self.assertEqual(
cmds[15].command, '/odoo_ar/nginx/conf/nginx.conf')
self.assertEqual(
cmds[15].usr_msg, 'Generating nginx.conf template')
self.assertEqual(
cmds[16].args,
'/odoo_ar/odoo-9.0/test2_client/sources/odoo-addons')
self.assertEqual(
cmds[16].command,
'git -C /odoo_ar/odoo-9.0/test2_client/sources/ clone --depth 1 '
'-b 9.0 https://github.com/jobiols/odoo-addons.git')
self.assertEqual(
cmds[16].usr_msg,
'cloning b 9.0 https://github.com/jobiols/odoo-addons.git')
self.assertEqual(
cmds[17].args,
'/odoo_ar/odoo-9.0/test2_client/sources/odoo-addons')
self.assertEqual(
cmds[17].command,
'git -C /odoo_ar/odoo-9.0/test2_client/sources/odoo-addons pull')
self.assertEqual(
cmds[17].usr_msg,
'pulling b 9.0 https://github.com/jobiols/odoo-addons.git')
self.assertEqual(
cmds[18].args,
'/odoo_ar/odoo-9.0/test2_client/sources/adhoc-odoo-argentina')
self.assertEqual(
cmds[18].command,
'git -C /odoo_ar/odoo-9.0/test2_client/sources/ clone --depth 1 '
'-b 9.0 https://github.com/ingadhoc/odoo-argentina.git adhoc-odoo-argentina')
self.assertEqual(
cmds[18].usr_msg,
'cloning b 9.0 https://github.com/ingadhoc/odoo-argentina.git >> adhoc-odoo-argentina')
self.assertEqual(
cmds[19].args,
'/odoo_ar/odoo-9.0/test2_client/sources/adhoc-odoo-argentina')
self.assertEqual(
cmds[19].command,
'git -C /odoo_ar/odoo-9.0/test2_client/sources/adhoc-odoo-argentina pull')
self.assertEqual(
cmds[19].usr_msg,
'pulling b 9.0 https://github.com/ingadhoc/odoo-argentina.git >> adhoc-odoo-argentina')
def test_install2_enterprise(self):
""" ################################### TEST INSTALLATION v2 ENTERPRISE
"""
options = {
'debug': True,
'no-repos': False,
'nginx': True,
'extract_sources': False,
}
base_dir = '/odoo_ar/'
oe = OdooEnv(options)
cmds = oe.install('test2e_client')
self.assertEqual(
cmds[0].args, base_dir)
self.assertEqual(
cmds[0].command, 'sudo mkdir ' + base_dir)
self.assertEqual(
cmds[0].usr_msg, 'Installing client test2e_client')
self.assertEqual(
cmds[2].args, '{}odoo-9.0e/test2e_client/postgresql'.format(base_dir))
self.assertEqual(
cmds[2].command,
'mkdir -p {}odoo-9.0e/test2e_client/postgresql'.format(base_dir))
self.assertEqual(
cmds[2].usr_msg, False)
self.assertEqual(
cmds[8].args, '/odoo_ar/odoo-9.0e/dist-packages')
self.assertEqual(
cmds[8].command, 'mkdir -p /odoo_ar/odoo-9.0e/dist-packages'
)
self.assertEqual(
cmds[8].usr_msg, False)
def test_cmd(self):
""" ########################################################## TEST CMD
"""
options = {
'debug': False,
'no-repos': False,
'nginx': False,
}
oe = OdooEnv(options)
# si no tiene argumentos para chequear no requiere chequeo
c = Command(oe, command='cmd', usr_msg='hola')
self.assertEqual(c.command, 'cmd')
self.assertEqual(c.usr_msg, 'hola')
self.assertEqual(c.args, False)
self.assertEqual(c.check(), True)
c = MakedirCommand(oe, command='cmd', args='no_existe_este_directorio')
self.assertEqual(c.check_args(), True)
c = CreateNginxTemplate(oe, command='cmd',
args='no_exist',
usr_msg='Testing msg')
self.assertEqual(c.usr_msg, 'Testing msg')
def test_qa(self):
""" ########################################################### TEST QA
"""
options = {
'debug': False
}
client_name = 'test_client'
database = 'cliente_test'
modules = 'modulo_a_testear'
oe = OdooEnv(options)
client = Client(oe, client_name)
cmds = oe.qa(client_name, database, modules, client_test=client)
cmd = cmds[0]
self.assertEqual(cmd.usr_msg, 'Performing tests on module '
'modulo_a_testear for client '
'test_client and database cliente_test')
command = \
"sudo docker run --rm -it " \
"-v /odoo_ar/odoo-9.0/test_client/config:/opt/odoo/etc/ " \
"-v /odoo_ar/odoo-9.0/test_client/data_dir:/opt/odoo/data " \
"-v /odoo_ar/odoo-9.0/test_client/log:/var/log/odoo " \
"-v /odoo_ar/odoo-9.0/test_client/sources:" \
"/opt/odoo/custom-addons " \
"-v /odoo_ar/odoo-9.0/test_client/backup_dir:/var/odoo/backups/ " \
"--link wdb " \
"-e WDB_SOCKET_SERVER=wdb " \
"-e ODOO_CONF=/dev/null " \
"--link pg-test_client:db jobiols/odoo-jeo:9.0.debug -- " \
"-d cliente_test " \
"--stop-after-init " \
"--log-level=test " \
"--test-enable " \
"-u modulo_a_testear "
self.assertEqual(cmd.command, command)
def test_run_cli(self):
""" ###################################################### TEST RUN CLI
"""
options = {
'debug': False,
'nginx': False,
}
client_name = 'test_client'
oe = OdooEnv(options)
cmds = oe.run_client(client_name)
cmd = cmds[0]
self.assertEqual(cmd.usr_msg, 'Starting Odoo image for client '
'test_client on port 8069')
command = \
"sudo docker run -d " \
"--link aeroo:aeroo " \
"-p 8069:8069 " \
"-p 8072:8072 " \
"-v /odoo_ar/odoo-9.0/test_client/config:/opt/odoo/etc/ " \
"-v /odoo_ar/odoo-9.0/test_client/data_dir:/opt/odoo/data " \
"-v /odoo_ar/odoo-9.0/test_client/log:/var/log/odoo " \
"-v /odoo_ar/odoo-9.0/test_client/sources:" \
"/opt/odoo/custom-addons " \
"-v /odoo_ar/odoo-9.0/test_client/backup_dir:/var/odoo/backups/ " \
"--link pg-test_client:db " \
"--restart=always " \
"--name test_client " \
"-e ODOO_CONF=/dev/null " \
"jobiols/odoo-jeo:9.0 " \
"--logfile=/var/log/odoo/odoo.log "
self.assertEqual(cmd.command, command)
def test_run_cli_debug(self):
""" ############################################## TEST RUN CLI W/DEBUG
"""
options = {
'debug': True,
'nginx': False,
}
client_name = 'test_client'
oe = OdooEnv(options)
cmds = oe.run_client(client_name)
cmd = cmds[0]
self.assertEqual(cmd.usr_msg, 'Starting Odoo image for client '
'test_client on port 8069')
command = \
'sudo docker run --rm -it ' \
'--link aeroo:aeroo ' \
'--link wdb ' \
'-p 8069:8069 -p 8072:8072 ' \
'-v /odoo_ar/odoo-9.0/test_client/config:/opt/odoo/etc/ ' \
'-v /odoo_ar/odoo-9.0/test_client/data_dir:/opt/odoo/data ' \
'-v /odoo_ar/odoo-9.0/test_client/log:/var/log/odoo ' \
'-v /odoo_ar/odoo-9.0/test_client/sources:' \
'/opt/odoo/custom-addons ' \
'-v /odoo_ar/odoo-9.0/test_client/backup_dir:/var/odoo/backups/ ' \
'-v /odoo_ar/odoo-9.0/extra-addons:/opt/odoo/extra-addons ' \
'-v /odoo_ar/odoo-9.0/dist-packages:' \
'/usr/lib/python2.7/dist-packages ' \
'-v /odoo_ar/odoo-9.0/dist-local-packages:' \
'/usr/local/lib/python2.7/dist-packages ' \
'--link pg-test_client:db ' \
'--name test_client ' \
'-e ODOO_CONF=/dev/null ' \
'-e WDB_SOCKET_SERVER=wdb jobiols/odoo-jeo:9.0.debug ' \
'--logfile=/dev/stdout '
self.assertEqual(cmd.command, command)
def test_pull_images(self):
""" ################################################## TEST PULL IMAGES
"""
options = {
'debug': False,
'nginx': False,
}
client_name = 'test_client'
oe = OdooEnv(options)
cmds = oe.pull_images(client_name)
cmd = cmds[0]
self.assertEqual(cmd.usr_msg, 'Pulling Image aeroo')
command = 'sudo docker pull jobiols/aeroo-docs'
self.assertEqual(cmd.command, command)
cmd = cmds[1]
self.assertEqual(cmd.usr_msg, 'Pulling Image odoo')
command = 'sudo docker pull jobiols/odoo-jeo:9.0'
self.assertEqual(cmd.command, command)
cmd = cmds[2]
self.assertEqual(cmd.usr_msg, 'Pulling Image postgres')
command = 'sudo docker pull postgres:9.5'
self.assertEqual(cmd.command, command)
cmd = cmds[3]
self.assertEqual(cmd.usr_msg, 'Pulling Image nginx')
command = 'sudo docker pull nginx:latest'
self.assertEqual(cmd.command, command)
def test_update(self):
""" ################################################## TEST PULL UPDATE
"""
options = {
'debug': False,
'nginx': False,
}
client_name = 'test_client'
oe = OdooEnv(options)
cmds = oe.update(client_name, 'client_prod', ['all'])
command = \
"sudo docker run --rm -it " \
"-v /odoo_ar/odoo-9.0/test_client/config:/opt/odoo/etc/ " \
"-v /odoo_ar/odoo-9.0/test_client/data_dir:/opt/odoo/data " \
"-v /odoo_ar/odoo-9.0/test_client/log:/var/log/odoo " \
"-v /odoo_ar/odoo-9.0/test_client/sources:" \
"/opt/odoo/custom-addons " \
"-v /odoo_ar/odoo-9.0/test_client/backup_dir:/var/odoo/backups/ " \
"--link pg-test_client:db " \
"-e ODOO_CONF=/dev/null jobiols/odoo-jeo:9.0 " \
"-- " \
"--stop-after-init " \
"--logfile=false " \
"-d client_prod " \
"-u all "
self.assertEqual(cmds[0].command, command)
def test_restore(self):
""" ################################################# TEST PULL RESTORE
"""
options = {
'debug': False,
'nginx': False,
}
client_name = 'test_client'
database = 'client_prod'
backup_file = 'bkp.zip'
oe = OdooEnv(options)
cmds = oe.restore(client_name, database, backup_file, deactivate=True)
command = \
'sudo docker run --rm -i ' \
'--link pg-test_client:db ' \
'-v /odoo_ar/odoo-9.0/test_client/backup_dir/:/backup ' \
'-v /odoo_ar/odoo-9.0/test_client/data_dir/filestore:/filestore ' \
'--env NEW_DBNAME=client_prod ' \
'--env ZIPFILE=bkp.zip ' \
'--env DEACTIVATE=True ' \
'jobiols/dbtools:1.1.0 '
self.assertEqual(cmds[0].command, command)
def test_download_image_sources(self):
""" ####################################### TEST DOWNLOAD IMAGE SOURCES
"""
options = {
'debug': True,
'no-repos': False,
'nginx': False,
'extract_sources': True,
}
oe = OdooEnv(options)
cmds = oe.install('test_client')
command = 'sudo mkdir /odoo_ar/'
self.assertEqual(cmds[0].command, command)
#command = 'sudo chown jobiols:jobiols /odoo_ar/'
#self.assertEqual(cmds[1].command, command)
command = 'mkdir -p /odoo_ar/odoo-9.0/test_client/postgresql'
self.assertEqual(cmds[2].command, command)
command = 'mkdir -p /odoo_ar/odoo-9.0/test_client/config'
self.assertEqual(cmds[3].command, command)
command = 'mkdir -p /odoo_ar/odoo-9.0/test_client/data_dir'
self.assertEqual(cmds[4].command, command)
command = 'mkdir -p /odoo_ar/odoo-9.0/test_client/backup_dir'
self.assertEqual(cmds[5].command, command)
command = 'mkdir -p /odoo_ar/odoo-9.0/test_client/log'
self.assertEqual(cmds[6].command, command)
command = 'mkdir -p /odoo_ar/odoo-9.0/test_client/sources'
self.assertEqual(cmds[7].command, command)
command = 'mkdir -p /odoo_ar/odoo-9.0/dist-packages'
self.assertEqual(cmds[8].command, command)
command = 'mkdir -p /odoo_ar/odoo-9.0/dist-local-packages'
self.assertEqual(cmds[9].command, command)
command = 'mkdir -p /odoo_ar/odoo-9.0/extra-addons'
self.assertEqual(cmds[10].command, command)
command = 'chmod og+w /odoo_ar/odoo-9.0/dist-packages'
self.assertEqual(cmds[11].command, command)
command = 'chmod og+w /odoo_ar/odoo-9.0/dist-local-packages'
self.assertEqual(cmds[12].command, command)
command = 'chmod og+w /odoo_ar/odoo-9.0/extra-addons'
self.assertEqual(cmds[13].command, command)
command = 'chmod o+w /odoo_ar/odoo-9.0/test_client/config'
self.assertEqual(cmds[14].command, command)
command = 'chmod o+w /odoo_ar/odoo-9.0/test_client/data_dir'
self.assertEqual(cmds[15].command, command)
command = 'chmod o+w /odoo_ar/odoo-9.0/test_client/log'
self.assertEqual(cmds[16].command, command)
command = 'chmod o+w /odoo_ar/odoo-9.0/test_client/backup_dir'
self.assertEqual(cmds[17].command, command)
command = 'sudo docker run -it --rm ' \
'--entrypoint=/extract_dist-packages.sh ' \
'-v /odoo_ar/odoo-9.0/dist-packages/:/mnt/dist-packages ' \
'jobiols/odoo-jeo:9.0.debug '
self.assertEqual(cmds[18].command, command)
command = 'sudo docker run -it --rm ' \
'--entrypoint=/extract_dist-local-packages.sh ' \
'-v /odoo_ar/odoo-9.0/dist-local-packages/:' \
'/mnt/dist-local-packages ' \
'jobiols/odoo-jeo:9.0.debug '
self.assertEqual(cmds[19].command, command)
command = 'sudo docker run -it --rm ' \
'--entrypoint=/extract_extra-addons.sh ' \
'-v /odoo_ar/odoo-9.0/extra-addons/:/mnt/extra-addons ' \
'jobiols/odoo-jeo:9.0.debug '
self.assertEqual(cmds[20].command, command)
command = 'sudo chmod -R og+w /odoo_ar/odoo-9.0/dist-packages/'
self.assertEqual(cmds[21].command, command)
command = 'sudo chmod -R og+w /odoo_ar/odoo-9.0/dist-local-packages/'
self.assertEqual(cmds[22].command, command)
command = 'sudo chmod -R og+w /odoo_ar/odoo-9.0/extra-addons/'
self.assertEqual(cmds[23].command, command)
command = '/odoo_ar/odoo-9.0/dist-packages/.gitignore'
self.assertEqual(cmds[24].command, command)
command = '/odoo_ar/odoo-9.0/dist-local-packages/.gitignore'
self.assertEqual(cmds[25].command, command)
command = '/odoo_ar/odoo-9.0/extra-addons/.gitignore'
self.assertEqual(cmds[26].command, command)
command = 'git -C /odoo_ar/odoo-9.0/dist-packages/ init '
self.assertEqual(cmds[27].command, command)
command = 'git -C /odoo_ar/odoo-9.0/dist-local-packages/ init '
self.assertEqual(cmds[28].command, command)
command = 'git -C /odoo_ar/odoo-9.0/extra-addons/ init '
self.assertEqual(cmds[29].command, command)
command = 'git -C /odoo_ar/odoo-9.0/dist-packages/ add . '
self.assertEqual(cmds[30].command, command)
command = 'git -C /odoo_ar/odoo-9.0/dist-local-packages/ add . '
self.assertEqual(cmds[31].command, command)
command = 'git -C /odoo_ar/odoo-9.0/extra-addons/ add . '
self.assertEqual(cmds[32].command, command)
command = 'git -C /odoo_ar/odoo-9.0/dist-packages/ commit -m inicial '
self.assertEqual(cmds[33].command, command)
command = 'git -C /odoo_ar/odoo-9.0/dist-local-packages/ ' \
'commit -m inicial '
self.assertEqual(cmds[34].command, command)
command = 'git -C /odoo_ar/odoo-9.0/extra-addons/ commit -m inicial '
self.assertEqual(cmds[35].command, command)
command = 'git -C /odoo_ar/odoo-9.0/test_client/sources/ ' \
'clone --depth 1 -b 9.0 ' \
'https://github.com/jobiols/cl-test-client'
self.assertEqual(cmds[36].command, command)
command = 'git -C ' \
'/odoo_ar/odoo-9.0/test_client/sources/cl-test-client ' \
'pull'
self.assertEqual(cmds[37].command, command)
command = 'git -C /odoo_ar/odoo-9.0/test_client/sources/ ' \
'clone --depth 1 ' \
'-b 9.0 https://github.com/jobiols/odoo-addons'
self.assertEqual(cmds[38].command, command)
command = 'git -C ' \
'/odoo_ar/odoo-9.0/test_client/sources/odoo-addons ' \
'pull'
self.assertEqual(cmds[39].command, command)
def test_check_version(self):
""" ##################################################### CHECK VERSION
"""
self.assertTrue(OeConfig().check_version())
def test_environment(self):
""" ##################################################### CHECK VERSION
"""
env = OeConfig().get_environment()
OeConfig().save_environment('prod')
env = OeConfig().get_environment()
self.assertEqual(env, 'prod')
OeConfig().save_environment('debug')
env = OeConfig().get_environment()
self.assertEqual(env, 'debug')
def test_save_multiple_clients(self):
OeConfig().save_client_path('test_clientx', 'multiple_path1')
OeConfig().save_client_path('test_clientx', 'multiple_path2')
self.assertEqual(OeConfig().get_client_path('test_clientx'), 'multiple_path1')
def test_repo_clone(self):
repo = Repo({'usr': 'jobiols', 'repo': 'project', 'branch': '9.0'})
self.assertEqual(repo.clone, 'clone --depth 1 -b 9.0 https://github.com/jobiols/project')
def test_repo2_clone(self):
repo = Repo2('https://github.com/jobiols/project.git', '9.0')
self.assertEqual(repo.dir_name, 'project')
self.assertEqual(repo.branch, '9.0')
self.assertEqual(repo.url, 'https://github.com/jobiols/project.git')
self.assertEqual(repo.formatted, 'b 9.0 https://github.com/jobiols/project.git')
self.assertEqual(repo.clone, 'clone --depth 1 -b 9.0 https://github.com/jobiols/project.git')
self.assertEqual(repo.pull, 'pull')
def test_repo2_clone_branch(self):
repo = Repo2('https://github.com/jobiols/project.git -b 9.0', '8.0')
self.assertEqual(repo.dir_name, 'project')
self.assertEqual(repo.branch, '9.0')
self.assertEqual(repo.url, 'https://github.com/jobiols/project.git')
self.assertEqual(repo.formatted, 'b 9.0 https://github.com/jobiols/project.git')
self.assertEqual(repo.clone, 'clone --depth 1 -b 9.0 https://github.com/jobiols/project.git')
self.assertEqual(repo.pull, 'pull')
def test_repo2_clone_dir(self):
repo = Repo2('https://github.com/jobiols/project.git adhoc-project', '9.0')
self.assertEqual(repo.dir_name, 'adhoc-project')
self.assertEqual(repo.branch, '9.0')
self.assertEqual(repo.url, 'https://github.com/jobiols/project.git adhoc-project')
self.assertEqual(repo.formatted, 'b 9.0 https://github.com/jobiols/project.git >> adhoc-project')
self.assertEqual(repo.clone, 'clone --depth 1 -b 9.0 https://github.com/jobiols/project.git adhoc-project')
self.assertEqual(repo.pull, 'pull')
def test_repo2_clone_branch_dir(self):
repo = Repo2('https://github.com/jobiols/project.git adhoc-project -b 9.0', '8.0')
self.assertEqual(repo.dir_name, 'adhoc-project')
self.assertEqual(repo.branch, '9.0')
self.assertEqual(repo.url, 'https://github.com/jobiols/project.git adhoc-project')
self.assertEqual(repo.formatted, 'b 9.0 https://github.com/jobiols/project.git >> adhoc-project')
self.assertEqual(repo.clone, 'clone --depth 1 -b 9.0 https://github.com/jobiols/project.git adhoc-project')
self.assertEqual(repo.pull, 'pull')
def test_image(self):
image = Image({'name': 'odoo', 'usr': 'jobiols', 'img': 'odoo-jeo', 'ver': '9.0'})
self.assertEqual(image.name, 'jobiols/odoo-jeo:9.0')
self.assertEqual(image.version, '9.0')
self.assertEqual(image.short_name, 'odoo')
def test_image2(self):
image = Image2('odoo jobiols/odoo-jeo:9.0')
self.assertEqual(image.name, 'jobiols/odoo-jeo:9.0')
self.assertEqual(image.version, '9.0')
self.assertEqual(image.short_name, 'odoo')
``` |
{
"source": "jobirk/pulse_simulation",
"score": 3
} |
#### File: jobirk/pulse_simulation/cavity.py
```python
import numpy as np
import matplotlib.pyplot as plt
# from scipy import signal
from matplotlib import animation
# import scipy.constants as con
from IPython.display import HTML
from tqdm import tqdm
# import matplotlib.cm as cm
c = 1
def resonator_modes(t, z, n_modes=3, random_phases=False, plot=True,
figuresize=(10, 4), spectrum_std=1000, save_in=""):
# length of the resonator
L = z.max() - z.min()
# calculate the frequency difference between two neighbouring modes of
# the resonator
delta_nu = c / (2 * L)
frequencies = np.array([delta_nu * i for i in range(1, n_modes+1)])
phases = np.zeros(n_modes)
if random_phases is True:
phases = np.random.uniform(0, 200, n_modes)
# spectrum = signal.gaussian(n_modes, std=spectrum_std)
spectrum = np.ones(n_modes)
if plot is True:
fig, axs = plt.subplots(2, 1, figsize=figuresize, dpi=100, frameon=False)
axs[0].axis('off')
axs[1].axis('off')
axs.flatten()
axs[0].set_xlim(z.min(), z.max())
axs[1].set_xlim(z.min(), z.max())
# axs[2].plot(frequencies, spectrum)
# calculate the sum...
E_i = np.zeros([n_modes, len(z)])
for i in range(n_modes):
omega = 2 * np.pi * frequencies[i]
k = omega / c
E_i[i, :] = spectrum[i] * np.sin(2 * omega * t - phases[i]) * np.sin(k * z)
if plot is True:
fig_2, ax2 = plt.subplots(figsize=(10, 2), dpi=100, frameon=False)
ax2.set_ylim(-1.1, 1.1)
ax2.axis('off')
ax2.plot(z, E_i[i])
axs[0].plot(z, E_i[i], label=str(i))
if save_in != "":
fig_2.savefig(save_in+"_mode_"+str(i)+".pdf")
plt.close()
else:
pass
if plot is True:
E_total = np.sum(E_i, axis=0)
maximum = np.max(np.abs(E_total))
axs[1].set_ylim(- 1.2 * maximum, 1.2 * maximum)
# axs[0].legend()
axs[1].plot(z, E_total)
fig_3, ax3 = plt.subplots(figsize=(10, 2), dpi=100, frameon=False)
ax3.axis('off')
ax3.plot(z, E_total)
if save_in != "":
fig.savefig(save_in+"_both.pdf")
fig_3.savefig(save_in+"_sum.pdf")
plt.close()
else:
pass
return E_i
def animate_resonator(z, times, n_modes, ms_between_frames=60, figuresize=(11, 4), saveas=""):
"""Animates the time evolution of the wave packet
Parameters
----------
z : array_like
Array of the z-axis your wave packet is propagating on.
times : array_like
Times you want to include in the animation.
n_modes: int
Number of modes included in the calculation.
ms_between_frames : int, optional
Milliseconds of pause between two frames in the animation. Default
is 30.
figuresize : tuple of ints, optional
Size of the figure when plotting the wave. Default is (11, 4).
saveas : string, optional
Path where you want to save the animation as .gif-file.
"""
modes = [resonator_modes(t, z, n_modes, plot=False) for t in tqdm(times)]
pulses = [E_i.sum(axis=0) for E_i in tqdm(modes)]
fig, ax = plt.subplots(figsize=figuresize)
ax.set_xlim(z.min(), z.max())
maximum = np.max(np.abs(np.array(pulses)))
ax.set_ylim(-1.2 * maximum, 1.2 * maximum)
ax.set_xlabel(r"position $z$")
lines = [ax.plot([], [], color="forestgreen")[0]
for i in pulses]
def init():
for line in lines:
line.set_data([], [])
return lines
def animate(i):
for j in range(len(lines)):
lines[j].set_data(z, pulses[i])
return lines
plt.close()
anim = animation.FuncAnimation(fig, animate, init_func=init, blit=True,
frames=len(pulses),
interval=ms_between_frames)
if saveas != "":
anim.save(saveas, writer='imagemagick', fps=int(1000/ms_between_frames))
return HTML(anim.to_html5_video())
``` |
{
"source": "Jobkanis/Battleport",
"score": 3
} |
#### File: Jobkanis/Battleport/class_Game.py
```python
import random
import math
import time
import copy
import pygame
import class_Player
import class_Boats
import class_Positions
import class_Visual
import class_Menu
import database
class Game:
def __init__(self, gameDisplay, clock, width, height):
#creating classes
self.Sound_enabled = True
self.Music_enabled = True
self.Players = []
self.Positions = []
self.EmptyBoat = NotImplemented
self.EmptyPosition = NotImplemented
self.EmptyPlayer = NotImplemented
self.Player1 = NotImplemented
self.Player2 = NotImplemented
self.Visual = class_Visual.Visual(self, gameDisplay, clock, width, height)
self.Database = database.Database()
def setupgame(self, player1name, player2name):
if self.Music_enabled == True:
pygame.mixer.music.load("sound/bgm_ingame.wav")
pygame.mixer.music.set_volume(1)
pygame.mixer.music.play(-1)
######### Empty Variables ###########
########## Empty Classes ##########
self.EmptyPlayer = class_Player.Player(self, "empty")
self.Players.append(self.EmptyPlayer)
self.EmptyPosition = class_Positions.Position(self, -1, -1)
self.Positions.append(self.EmptyPosition)
self.EmptyBoat = class_Boats.Boat(self, self.EmptyPlayer, "empty")
self.EmptyPlayer.Boats.append(self.EmptyBoat)
################ Players ###################
self.CreatePositions() #Create all positions
self.att_sound = pygame.mixer.Sound('ship_att.wav')
self.sink_sound = pygame.mixer.Sound('ship_dead.wav')
self.goal_sound = pygame.mixer.Sound('ship_dead.wav')
self.move_sound = pygame.mixer.Sound('ship_move.wav')
self.ship_select_sound = pygame.mixer.Sound('ship_select.wav')
self.game_won = pygame.mixer.Sound('game_won.wav')
self.game_over = pygame.mixer.Sound('game_over.wav')
self.Player1 = class_Player.Player(self, player1name)
self.Players.append(self.Player1)
self.Player2 = class_Player.Player(self, player2name)
self.Players.append(self.Player2)
self.Winner = self.EmptyPlayer
self.Player_Playing = self.Player1
self.Visual.show_nextturn(self.Player_Playing)
self.Player1.CreateBoats()
self.Player_Playing = self.Player2
self.Visual.show_nextturn(self.Player_Playing)
self.Player2.CreateBoats()
self.Play()
return self.Winner
#sounds
def Play(self):
self.Player_Playing = self.Player2
while self.Winner == self.EmptyPlayer:
self.Visual.drawscreen()
time.sleep(1)
self.Player_Playing = self.NextPlayer()
self.Visual.show_nextturn(self.Player_Playing)
self.Player_Playing.PlayTurn()
self.Visual.drawscreen()
time.sleep(1)
if self.Sound_enabled:
self.game_over.play()
self.Visual.DrawWinnerScreen()
############# USEABLE GAME FUNCTIONS #############
def GetPosition(self, x, y):
for Pos in self.Positions:
if Pos.X == x and Pos.Y == y:
return Pos
return self.EmptyPosition
def GetBoat(self, x, y):
for LocalBoats in GetBoatPositions(self):
if LocalBoats.X == x and LocalBoats.Y == y:
return LocalBoats
for LocalPlayers in self.Players:
for boat in LocalPlayers.Boats:
if boat.X == x and boat.Y == y:
return boat
else: return self.EmptyBoat
############### SPECIFIC GAME FUNCTIONS ###################
def NextPlayer(self):
if self.Player_Playing == self.Player1:
return self.Player2
else:
return self.Player1
def CreatePositions(self):
print("Creating positions")
for y in range (0,20):
for x in range (0,20):
LocalPosition = class_Positions.Position(self, x, y)
self.Positions.append(LocalPosition)
def GetAllBoatPositions(self, exception): #exception is list
BoatPositions = []
BoatPositions += self.Player1.GetPlayerBoatPositions(exception) #exception
BoatPositions += self.Player2.GetPlayerBoatPositions(exception) #exception
return BoatPositions
def ToughUpdateBoats(self):
positions = self.Positions
Player1Boats = self.Player1.Boats
Player2Boats = self.Player2.Boats
for localpositions in positions:
localpositions.Boat = self.EmptyBoat
for p1boats in Player1Boats:
allboatpositions = p1boats.GetLocalBoatsPositions(True, -1, -1, "inactive")
for p1allboats in allboatpositions:
if p1allboats.X == localpositions.X and p1allboats.Y == localpositions.Y:
localpositions.Boat = p1boats
for p2boats in Player2Boats:
allboatpositions = p2boats.GetLocalBoatsPositions(True, -1, -1, "inactive")
for p2allboats in allboatpositions:
if p2allboats.X == localpositions.X and p2allboats.Y == localpositions.Y:
localpositions.Boat = p2boats
```
#### File: Jobkanis/Battleport/class_Player.py
```python
import random
import math
import time
import copy
import pygame
import class_Boats
import class_Positions
import class_Game
class Player:
def __init__(self, gameclass, playername):
####################################################
#For educational purposes
self.Game = gameclass
Visual = self.Game.Visual
Players = self.Game.Players
EmptyPlayer= self.Game.EmptyPlayer
Player1 = self.Game.Player1
Player2 = self.Game.Player2
Positions = self.Game.Positions
EmptyPosition = self.Game.EmptyPosition
EmptyBoat = self.Game.EmptyPosition
###################################################
self.Name = playername
self.Cards = []
self.Boats = []
def PlayTurn(self):
print("It is " + self.Name + "'s turn")
#Creating possibleaction value
#for keeping track what the user did and can still do
AvaibleBoatsToMove = []
for LocalBoat in self.Boats:
AvaibleBoatsToMove.append(LocalBoat.Name)
AvaibleAttacks_No = 2
AvaibleBoatsToAttack = []
for LocalBoat in self.Boats:
AvaibleBoatsToAttack.append(LocalBoat.Name)
#AvaiblePlayCards_No = 2
#Taking a card
#if len(self.Cards) < 7:
# ("Takecard: yet to be implemented")
#The actual possible moves the player can do (loop)
LocalDone = False
while LocalDone == False and self.Game.Winner == self.Game.EmptyPlayer:
Actions = ["boataction", "play cards", "end turn"]
################# GETTING EASY TRUE/FALSE STATEMENTS ###############################
# ATTACK AND MOVE
BoatsAbleToAttack = self.GetBoatsAbleToAttack(AvaibleBoatsToAttack) #return list of boats able to attack - boat classes!
AbleToAttack = False
if AvaibleAttacks_No > 0:
if len(BoatsAbleToAttack) > 0:
AbleToAttack = True
BoatsAbleToMove = self.GetBoatsAbleToMove(AvaibleBoatsToMove) #return list of boats able to move - boat classes!
AbleToMove = False
if len(BoatsAbleToMove) > 0:
AbleToMove = True
# BOAT SELECT
BoatsAbleForAction = []
BoatsAbleForAction += BoatsAbleToMove
for LocalBoatsAbleToAttack in BoatsAbleToAttack:
if LocalBoatsAbleToAttack not in BoatsAbleForAction:
BoatsAbleForAction.append(LocalBoatsAbleToAttack)
AbleToBoatAction = False
if len(BoatsAbleForAction) >0 :
AbleToBoatAction = True
# PLAY CARDS
#AbleToPlayCards = False
#if AvaiblePlayCards_No > 0:
# AbleToPlayCards = True
######## PHASE 1: PICKING A BOAT FOR ACTION OR CHOOSING CARD PLAY #######
if AbleToBoatAction == True: #or AbleToPlayCards == True:
#PossibleActions = ["end turn"]
#if AbleToBoatActoin == True:
# PossibleActions.append("boataction")
#if AbleToPlayCards == True:
# PossibleActions.append("play cards")
if AbleToMove == True and AbleToAttack == True:
MessageBox1Tekst = "Choose a boat to move or attack"
elif AbleToMove == True and AbleToAttack == False:
MessageBox1Tekst = "Choose a boat to move"
elif AbleToMove == False and AbleToAttack == True:
MessageBox1Tekst = "Choose a boat to attack"
MessageBox2Tekst = "Actions left: " + str(len(BoatsAbleToMove)) + " movement actions | " + str(AvaibleAttacks_No) + " potential attacks"
length = len(MessageBox1Tekst)
spacenum = 120 - length
halfspacenum = int(spacenum / 2)
MessageBox1Tekst = " " * halfspacenum + MessageBox1Tekst
length = len(MessageBox2Tekst)
spacenum = 66 - length
halfspacenum = int(spacenum / 2)
MessageBox2Tekst = " " * halfspacenum + MessageBox2Tekst
ActionPhase1 = self.Game.Visual.ChooseActionPhase1(BoatsAbleForAction, BoatsAbleToMove, BoatsAbleToAttack, MessageBox1Tekst, MessageBox2Tekst) #AvaiblePlayCards_No) #returns boatclass for boataction, returns 'play cards' or 'end turn'
if ActionPhase1 in BoatsAbleToAttack or ActionPhase1 in BoatsAbleToMove: #returned a boat to move
############# PHASE 2: PICKING A BOAT ACTION #########################
print("Boat chosen: " + ActionPhase1.Name)
if self.Game.Sound_enabled:
self.Game.ship_select_sound.play()
LocalBoat = ActionPhase1
PositionsToAttack = LocalBoat.GetPositionsInreachToAttack()
BoatsToAttack = LocalBoat.GetBoatsInReachToAttack()
AbleToMove = False
if LocalBoat.Name in AvaibleBoatsToMove:
BoatsPossibleMovement = LocalBoat.GetPossibleMovement()
if len(BoatsPossibleMovement) > 0:
AbleToMove = True
BoatsPossibleStance = LocalBoat.GetPossibleDefensiveStance()
if len(BoatsPossibleStance) > 0:
AbleToMove = True
AbleToAttackBoats = False
if AvaibleAttacks_No > 0:
if LocalBoat.Name in AvaibleBoatsToAttack:
if len(BoatsToAttack) > 0:
AbleToAttackBoats = True
CheckPoint = False
while CheckPoint == False:
MessageBox1Tekst = " " * 20 + "Choose an action (" + "Actions left: " + str(len(BoatsAbleToMove)) + " movements | " + str(AvaibleAttacks_No) + " attacks)"
MessageBox2Tekst = "Selected: " + LocalBoat.Name + " | health: " + str(LocalBoat.Health) + "/" + str(LocalBoat.MaxHealth) + " | movementrange: " + str(LocalBoat.MovementRange)
length = len(MessageBox2Tekst)
spacenum = 64 - length
halfspacenum = int(spacenum / 2)
MessageBox2Tekst = " " * halfspacenum + MessageBox2Tekst
BoatAction = self.Game.Visual.ChooseBoatActionPhase2(LocalBoat, AbleToMove, AbleToAttackBoats, PositionsToAttack, MessageBox1Tekst, MessageBox2Tekst) #returns 'attack when pressed attack, returns 'move' when pressed move, returns 'cancel' when cancled
if self.Game.Sound_enabled:
self.Game.ship_select_sound.play()
if BoatAction == "attack":
######################### PHASE 3: ATTACKING A BOAT #####################################
print("Attacking with " + LocalBoat.Name)
AvaibleAttacks_No -= 1
AvaibleBoatsToAttack.remove(LocalBoat.Name)
self.Attack(LocalBoat)
CheckPoint = True
elif BoatAction == "move":
######################### PHASE 3: MOVING A BOAT #####################################
print("Moveing " + LocalBoat.Name)
AvaibleBoatsToMove.remove(LocalBoat.Name)
self.MoveBoat(LocalBoat)
CheckPoint = True
elif BoatAction == "cancel":
print("Canceled boataction")
#move back to phase 1
CheckPoint = True
#elif ActionPhase1 == "play cards": #returned "Play cards"\
####### PHASE 2 ##############
# print("Play cards")
# AvaiblePlayCards_No
elif ActionPhase1 == "end turn": #returned "end turn"
# END TURN
print("ended turn")
LocalDone = True
else:
#TURN DONE
LocalDone = True
def CreateBoats(self):
self.Boats = []
self.Boats.append(class_Boats.Boat(self.Game, self, "size2"))
self.Boats.append(class_Boats.Boat(self.Game, self, "size31"))
self.Boats.append(class_Boats.Boat(self.Game, self, "size32"))
self.Boats.append(class_Boats.Boat(self.Game, self, "size4"))
################## ACTIONS (Phase 3) #########################
#def PlayCards(self):
# print("Choose a card to play: yet to be implemented")
# print("Play the card: yet to be implemented")
def Attack(self, Boat):
BoatPositionsAbleToAttack = Boat.GetPositionsInreachToAttack()
BoatsAbleToAttack = Boat.GetBoatsInReachToAttack()
MessageBox1Tekst = " " * 49 + "Choose a boat to attack"
MessageBox2Tekst = "Selected: " + Boat.Name + " | health: " + str(Boat.Health) + "/" + str(Boat.MaxHealth) + " | movementrange: " + str(Boat.MovementRange)
length = len(MessageBox2Tekst)
spacenum = 64 - length
halfspacenum = int(spacenum / 2)
MessageBox2Tekst = " " * halfspacenum + MessageBox2Tekst
BoatToAttack = self.Game.Visual.GetAttackActionPhase3(Boat, BoatPositionsAbleToAttack, BoatsAbleToAttack, MessageBox1Tekst, MessageBox2Tekst)
BoatToAttack.DealDamage(1)
def MoveBoat(self, Boat):
#CHOOSE MOVE ACTION TO DO
self.Game.Visual.drawscreen()
time.sleep(0.5)
AvaibleMovements = Boat.MovementRange
LocalDone = False
while AvaibleMovements > 0 and LocalDone == False:
Action = ""
MovementAction = ""
PossibleStanceActions = Boat.GetPossibleDefensiveStance()
PossibleMovementActions = Boat.GetPossibleMovement()
PositionsToAttack = Boat.GetPositionsInreachToAttack()
MessageBox1Tekst = " " * 38 + "Move your boat ("+ str(AvaibleMovements) + "/" + str(Boat.MovementRange) + " movements left)" #55
MessageBox2Tekst = "Selected: " + Boat.Name + " | health: " + str(Boat.Health) + "/" + str(Boat.MaxHealth) + " | movementrange: " + str(Boat.MovementRange)
length = len(MessageBox2Tekst)
spacenum = 64 - length
halfspacenum = int(spacenum / 2)
MessageBox2Tekst = " " * halfspacenum + MessageBox2Tekst
#MessageBox2Tekst = " " * 24 + "You have " + str(AvaibleMovements) + "/" + str(Boat.MovementRange) + " movements left"
MovementAction = self.Game.Visual.GetMovementActionPhase3(Boat, PossibleStanceActions, PossibleMovementActions, PositionsToAttack, MessageBox1Tekst, MessageBox2Tekst) #returns ["stance", "left"/"right"/"inactive"] or ["move", "left"/"right","forward","backward"] or ["stop", "stop]
if MovementAction[0] == "stance":
if self.Game.Sound_enabled:
self.Game.move_sound.play()
Action = MovementAction[1]
AvaibleMovements -= 1
Boat.ChangeBoatStance(Action)
self.Game.Visual.drawscreen()
time.sleep(0.7)
if MovementAction[0] == "move":
if self.Game.Sound_enabled:
self.Game.move_sound.play()
Action = MovementAction[1]
AvaibleMovements -= 1
AvaibleMovements = Boat.ChangeBoatPosition(Action, AvaibleMovements)
self.Game.Visual.drawscreen()
time.sleep(0.7)
if MovementAction[0] == "stop":
if self.Game.Sound_enabled:
self.Game.ship_select_sound.play()
print("Stopped moving boat")
LocalDone = True
#yet to be implemented: end reached
################### OTHER FUNCTIONS #######################
def GetBoatsAbleToAttack(self, AvaibleBoatsToAttack): #return list of boats able to attack
BoatsAbleToAttack = []
for boatname in AvaibleBoatsToAttack:
boatclass = self.GetBoatFromName(boatname)
LocalBoatsToAttack = boatclass.GetBoatsInReachToAttack()
if len(LocalBoatsToAttack) > 0:
BoatsAbleToAttack.append(boatclass)
return BoatsAbleToAttack
def GetBoatsAbleToMove(self, AvaibleBoatsToMove): #return list of boats able to move
Boats = AvaibleBoatsToMove
BoatsAbleToMove = []
for LocalBoatName in Boats:
LocalBoatClass = self.GetBoatFromName(LocalBoatName)
BoatAbleToMove = False
LocalPossibleStance = LocalBoatClass.GetPossibleDefensiveStance()
if len(LocalPossibleStance) > 0:
BoatAbleToMove = True
LocalPossibleMovement = LocalBoatClass.GetPossibleMovement()
if len(LocalPossibleMovement) > 0:
BoatAbleToMove = True
if BoatAbleToMove == True:
BoatsAbleToMove.append(LocalBoatClass)
return BoatsAbleToMove
def GetPlayerBoatPositions(self, exception):
BoatPositions = []
for PlayerBoats in self.Boats:
if PlayerBoats not in exception:
BoatPositions += PlayerBoats.GetLocalBoatsPositions(True, -1, -1, "inactive")
return BoatPositions
def GetBoatFromName(self,BoatName):
Boats = self.Boats
for LocalBoat in Boats:
if LocalBoat.Name == BoatName:
return LocalBoat
def DeleteBoat(self, Boat):
if Boat in self.Boats:
self.Boats.remove(Boat)
del Boat
if len(self.Boats) == 0:
if self == self.Game.Player1:
opponent = self.Game.Player2
elif self == self.Game.Player2:
opponent = self.Game.Player1
self.Game.Winner = opponent
``` |
{
"source": "jobkarani/carnect",
"score": 2
} |
#### File: carnect/car/forms.py
```python
from django import forms
from .models import *
from django.forms import ModelForm, DateInput
class ProfileForm(forms.ModelForm):
class Meta:
model=Profile
fields=['first_name','last_name','profile_pic','description','mobile_number','email','category']
class PostForm(forms.ModelForm):
class Meta:
model=Post
fields=['issue','issue_picture','make','model','year']
class AdviceForm(forms.ModelForm):
class Meta:
model=Advice
fields=['advice']
class SaleForm(forms.ModelForm):
class Meta:
model=Sale
fields=['picture','picture2','picture3','make','model','year','description','price','engine_size']
class ResponseForm(forms.ModelForm):
class Meta:
model=Response
widgets = {
'schedule_time': DateInput(attrs={'type': 'datetime-local'}, format='%Y-%m-%dT%H:%M'),
}
fields=['responses','shop','location','schedule_time']
class EventForm(forms.ModelForm):
class Meta:
model = Event
# datetime-local is a HTML5 input type, format to make date time show on fields
widgets = {
'start_time': DateInput(attrs={'type': 'datetime-local'}, format='%Y-%m-%dT%H:%M'),
'end_time': DateInput(attrs={'type': 'datetime-local'}, format='%Y-%m-%dT%H:%M'),
}
fields = ['title','description','start_time','end_time']
def __init__(self, *args, **kwargs):
super(EventForm, self).__init__(*args, **kwargs)
# input_formats to parse HTML5 datetime-local input to datetime field
self.fields['start_time'].input_formats = ('%Y-%m-%dT%H:%M',)
self.fields['end_time'].input_formats = ('%Y-%m-%dT%H:%M',)
``` |
{
"source": "jobkarani/InstaClone",
"score": 2
} |
#### File: InstaClone/insta/models.py
```python
from django.db import models
from cloudinary.models import CloudinaryField
from django.contrib.auth.models import User
import datetime as dt
from tinymce.models import HTMLField
# Create your models here.
# image
class Image(models.Model):
user = models.ForeignKey(User, on_delete=models.PROTECT,related_name='user_images')
image = CloudinaryField('image')
name = models.CharField(max_length=40)
caption = models.CharField(max_length=200)
posted_on = models.DateTimeField(auto_now_add=True)
liked= models.ManyToManyField(User,default=None,blank=True,related_name='liked')
comment = models.IntegerField(blank=True,null=True,default=True)
profile = models.ForeignKey(User, on_delete=models.CASCADE,null=True)
class meta:
ordering =['posted_on']
def save_image(self):
self.save()
def delete_image(self):
self.delete()
def update_caption(self, new_caption):
self.caption = new_caption
self.save()
@classmethod
def search_by_name(cls,search_term):
posts = cls.objects.filter(name__icontains=search_term)
return posts
@property
def saved_comments(self):
return self.comments.all()
@property
def saved_likes(self):
return self.postslikes.count()
def __str__(self):
return self.name
liking={('Like','Like'),('Unlike','Unlike')}
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.PROTECT)
profile_photo = CloudinaryField('image')
bio = models.TextField(max_length=650,blank=True,null=True)
def save_profile(self):
self.save()
def delete_profile(self):
self.save()
def update(self):
self.save()
def __str__(self):
return self.user.username
class Comment(models.Model):
comment = models.CharField(max_length=250)
image = models.ForeignKey(Image,on_delete = models.CASCADE,related_name='comments')
user = models.ForeignKey(User,on_delete = models.CASCADE,related_name='comments')
@classmethod
def display_comment(cls,image_id):
comments = cls.objects.filter(image_id = image_id)
return comments
class Like(models.Model):
val = models.CharField(choices=liking,default='like',max_length=50)
image = models.ForeignKey(Image,on_delete = models.CASCADE)
user = models.ForeignKey(User,on_delete = models.CASCADE)
def __str__(self):
return self.val
```
#### File: InstaClone/insta/tests.py
```python
from django.test import TestCase
from .models import *
# Create your tests here.
class ProfileTestClass(TestCase):
# Set up method
def setUp(self):
"""creation of profile for testing
"""
user = User.objects.create(
username = 'ayubu',
first_name = 'ayub',
last_name = '254')
Profile.objects.create(
bio = 'hey',
profile_photo = 'static/image/travel.webp',
user_id = user.id
)
def test_bio(self):
"""tests the profiles bio
"""
profile=Profile.objects.get(bio="hey")
self.assertEqual(profile.bio, "hey")
class ImageTestCase(TestCase):
def setUp(self):
"""image creation
"""
user = User.objects.create(
username = 'ayubu',
first_name = 'ayub',
last_name = '254')
Image.objects.create(
name="init",
caption="ooops",
profile_id=user.id,
user_id=user.id
)
def test_image_name(self):
"""tests image name
"""
image=Image.objects.get(name="init")
self.assertEqual(image.name, "init")
class LikeTestCase(TestCase):
def setUp(self):
user = User.objects.create(
username = 'ayubu',
first_name = 'ayub',
last_name = '254')
Profile.objects.create(
bio = 'hey',
profile_photo = 'static/image/travel.webp',
user_id = user.id
)
Image.objects.create(
name="init",
caption="ooops",
profile_id=user.id,
user_id=user.id
)
def test_image_id(self):
user = User.objects.create(
username = 'yub',
first_name = 'yubus',
last_name = '_254')
Image.objects.create(
name="init",
caption="ooops",
profile_id=user.id,
user_id=user.id
)
``` |
{
"source": "jobkarani/news-app",
"score": 3
} |
#### File: app/main/views.py
```python
from flask import render_template,request,redirect,url_for
from . import main
from ..request import get_source,get_article
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
# Getting popular movie
popular_sources = get_source()
# print("sources" ,popular_sources)
business_news = get_source()
sport_news = get_source()
title = 'Home - Welcome to Online News Website'
return render_template('index.html', title = title, popular = popular_sources,business = business_news,sport = sport_news )
@main.route('/articles/<source_id>')
def articles(source_id):
articles = get_article(source_id)
print(articles)
return render_template('articles.html',articles = articles)
```
#### File: news-app/tests/source_test.py
```python
import unittest
class Source:
'''
Source class to define source Objects
'''
def __init__(self,id,name,description,url,category,country):
self.id = id
self.name = name
self.description = description
self.url = url
self.category = category
self.country = country
class SourceTest(unittest.TestCase):
'''
Test Class to test the behaviour of the source class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_source = Source("bbc-sport","BBC Sport","The home of BBC Sport online. Includes live sports coverage, breaking news, results, video, audio and analysis on Football, F1, Cricket, Rugby Union, Rugby League, Golf, Tennis and all the main world sports, plus major events such as the Olympic Games.","http://www.bbc.co.uk/sport ","sports","gb")
def test_instance(self):
self.assertTrue(isinstance(self.new_source,Source))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joblackpoc/StudyJupyter",
"score": 2
} |
#### File: joblackpoc/StudyJupyter/pivottablejs.py
```python
__version__ = '2.21.0'
template = """
<!DOCTYPE html>
<html>
<head>
<title>PivotTable.js</title>
<!-- external libs from cdnjs -->
<link rel="stylesheet" type="text/css" href="%(static)s/pivot-ajax/c3.min.css">
<script type="text/javascript" src="%(static)s/pivot-ajax/jquery.min.js"></script>
<script type="text/javascript" src="%(static)s/pivot-ajax/jquery-ui.min.js"></script>
<script type="text/javascript" src="%(static)s/pivot-ajax/d3.min.js"></script>
<script type="text/javascript" src="%(static)s/pivot-ajax/jquery.csv-0.71.min.js"></script>
<script type="text/javascript" src="%(static)s/pivot-ajax/c3.min.js"></script>
<link rel="stylesheet" type="text/css" href="%(static)s/pivottable/pivot.min.css">
<script type="text/javascript" src="%(static)s/pivottable/pivot.min.js"></script>
<script type="text/javascript" src="%(static)s/pivottable/d3_renderers.min.js"></script>
<script type="text/javascript" src="%(static)s/pivottable/c3_renderers.min.js"></script>
<script type="text/javascript" src="%(static)s/pivottable/export_renderers.min.js"></script>
<style>
body {font-family: Verdana;}
.node {
border: solid 1px white;
font: 10px sans-serif;
line-height: 12px;
overflow: hidden;
position: absolute;
text-indent: 2px;
}
.c3-line, .c3-focused {stroke-width: 3px !important;}
.c3-bar {stroke: white !important; stroke-width: 1;}
.c3 text { font-size: 12px; color: grey;}
.tick line {stroke: white;}
.c3-axis path {stroke: grey;}
.c3-circle { opacity: 1 !important; }
</style>
</head>
<body>
<script type="text/javascript">
$(function(){
if(window.location != window.parent.location)
$("<a>", {target:"_blank", href:""})
.text("[pop out]").prependTo($("body"));
$("#output").pivotUI(
$.csv.toArrays($("#output").text()),
{
renderers: $.extend(
$.pivotUtilities.renderers,
$.pivotUtilities.c3_renderers,
$.pivotUtilities.d3_renderers,
$.pivotUtilities.export_renderers
),
hiddenAttributes: [""]
}
).show();
});
</script>
<div id="output" style="display: none;">%(div)s</div>
</body>
</html>
"""
from IPython.display import IFrame
def pivot_ui(df, outfile_path = "pivottablejs.html", width="100%", height="500"):
# FIXME: we shouldn't hard-code the port here
port = 8888 # get_config().NotebookApp.port
static_path = 'http://localhost:%s' % port
with open(outfile_path, 'w') as outfile:
outfile.write(template % {'div': df.to_csv(),
'static': '%s/static' % static_path})
return IFrame(src=outfile_path, width=width, height=height)
``` |
{
"source": "jobliz/solid-state-kinetics",
"score": 3
} |
#### File: ssk/alpha/api.py
```python
from __future__ import division
import numpy as np
from scipy import integrate
__all__ = ['area', 'simple']
def simple(p):
pass
def area(p):
cumul = np.hstack(([0], integrate.cumtrapz(np.abs(np.gradient(p)))))
return cumul / max(cumul)
```
#### File: ssk/simulation/api.py
```python
from __future__ import division
import math
import itertools
import functools
import numpy as np
from scipy.constants import R
from scipy.optimize import minimize
from scipy.integrate import ode
from ..ti import senumyang
__all__ = [
'psi',
'single_isothermal',
'single_nonisothermal',
'ni_rates',
'ni_integrate'
]
def psi(T, alphas, rate, A, Ea, g=None):
"""
Objetive function for a single-step linear nonisothermal simulation.
Parameters
----------
T : int or float
Absolute temperature (this will be the optimized value)
alphas : ndarray
Transformed fraction values.
rate : float or int
Linear heating rate.
A : float or int
Pre-exponential factor.
E : float or int
Activation energy (J/mol)
Returns
-------
Single numerical value to be optimized. The closer this value is to zero,
the better.
Notes
-----
Uses the Senum-Yang approximation for calculating the temperature
integral. Set g keyword argument with functools.partial.
"""
x = Ea/(R*T)
return np.abs((g(alphas) * (rate*R)/(A*Ea)) - senumyang(x))
def single_isothermal(model, A, E, alphas, T):
"""
Simulates isothermal curves from a single model and given parameters
Parameters
----------
model : callable
Integral form of a kinetic model.
A : float or int
Pre-exponential factor.
E : float or int
Activation energy (J/mol)
alphas : ndarray
Transformed fraction values.
T : int or float
Absolute temperature.
Returns
-------
A list of simulated time steps sequentially associated with given
trasformation fraction values.
"""
return model(alphas) / A * np.exp(Ea/(R*T))
def single_nonisothermal(model, A, E, alphas, rate, T0=500, method="Nelder-Mead"):
"""
Simulate a linear nonisothermal curve from a single model and given parameters
with an optimization procedure.
Parameters
----------
model : callable
Integral form of a kinetic model.
A : float or int
Pre-exponential factor.
E : float or int
Activation energy (J/mol)
alphas : ndarray
Transformed fraction values.
rate : float or int
Linear heating rate.
T0 : float or int
Initial temperature guess.
method : str or callable
Proxy for optimize.minimize(method=value)
Returns
-------
A list of simulated temperatures sequentially associated with given
trasformation fraction values.
References
----------
#TODO
"""
objfun = functools.partial(psi, g=model)
output = [minimize(objfun, T0, args=(a, rate, A, E), method=method) for a in alphas]
return [o.x for o in output]
def ni_rates(*args):
"""
Calculates non-isothermal rate constants. Parameter format is
[b, T, A1, E1, A2, E2, A3, E3...]
Parameters (*args) all int or float
----------------------------------
[0] : Heating rate (b)
[1] : Actual temperature
[n] : Pre-exponential factor
[n+1] : Activation Energy
Returns
-------
k : iterable
List of rate constants for given non-isothermal step
"""
b, T, A, E = args[0], args[1], [], []
cycle = itertools.cycle([A, E])
for arg in args[2:]:
cycle.next().append(arg)
K = []
for n, _ in enumerate(A):
K.append(A[n]/b * math.exp(-E[n]/(R*T)))
return K
def ni_integrate(func, T0, args=None, dT=1, T1=None, verbose=False):
"""
Integrate a non-isothermal composite kinetic model.
Parameters
----------
func : callable
Model function
T0 : int or float
Startint temperature (in Kelvins)
args : iterable
Heating rate followed by A, E pairs.
dT : int or float
Temperature step size
T1 : int or float (optional)
Force final simulation temperature to this value.
verbose : boolean (optional)
Print results from every integration step
Returns
-------
temps : iterable
Temperature list.
alphas: iterable
Transformation fraction list.
"""
n = math.ceil(len(args[2:]) / 2) + 1
r = ode(func).set_integrator('zvode', method='bdf', with_jacobian=False)
r.set_initial_value(np.zeros(n), T0).set_f_params(*args)
temps, alphas = [], []
while r.successful():
r.integrate(r.t+dT)
if verbose:
print r.t, r.y
# work until alpha == 1
if not T1:
if r.y[n-1] < 1: # last y position should be total transformed fraction
temps.append(r.t)
alphas.append(r.y[n-1])
else:
break
# work until given last temperature (long graph style)
else:
if r.t < T1:
temps.append(r.t)
alphas.append(r.y[n-1])
else:
break
return temps, alphas
``` |
{
"source": "joblocal/certbot-route53-plugin",
"score": 3
} |
#### File: certbot-route53-plugin/certbot_route53/authenticator.py
```python
import logging
import time
import zope.interface
import boto3
from acme import challenges
from certbot import interfaces
from certbot.plugins import common
logger = logging.getLogger(__name__)
TTL = 10
class Authenticator(common.Plugin):
zope.interface.implements(interfaces.IAuthenticator)
zope.interface.classProvides(interfaces.IPluginFactory)
description = "Route53 Authenticator"
def __init__(self, *args, **kwargs):
super(Authenticator, self).__init__(*args, **kwargs)
self._httpd = None
self.r53 = boto3.client('route53')
def prepare(self): # pylint: disable=missing-docstring,no-self-use
pass # pragma: no cover
def more_info(self): # pylint: disable=missing-docstring,no-self-use
return ("")
def get_chall_pref(self, domain):
# pylint: disable=missing-docstring,no-self-use,unused-argument
return [challenges.DNS01]
def perform(self, achalls): # pylint: disable=missing-docstring
responses = []
last = len(achalls) - 1
for i, achall in enumerate(achalls):
self._perform_single(achall, (i == last))
for achall in achalls:
responses.append(self._validate_single(achall))
return responses
def _find_zone(self, domain):
return max(
(
zone for zone in self.r53.list_hosted_zones()["HostedZones"]
if (domain+".").endswith("."+zone["Name"]) or (domain+".") == (zone["Name"])
),
key=lambda zone: len(zone["Name"]),
)
def _perform_single(self, achall, wait_for_change=False):
# provision the TXT record, using the domain name given. Assumes the hosted zone exits, else fails the challenge
logger.info("Adding challange to " + achall.domain)
try:
zone = self._find_zone(achall.domain)
except ValueError as e:
logger.error("Unable to find matching Route53 zone for domain " + achall.domain)
return None
_, validation = achall.response_and_validation()
self._excute_r53_action(achall, zone, validation, 'UPSERT', wait_for_change)
def _validate_single(self, achall):
# provision the TXT record, using the domain name given. Assumes the hosted zone exits, else fails the challenge
logger.info("Doing validation for " + achall.domain)
response, _ = achall.response_and_validation()
for _ in xrange(TTL*6):
if response.simple_verify(
achall.chall,
achall.domain,
achall.account_key.public_key(),
):
break
logger.info("Waiting for DNS propagation of " + achall.domain + "...")
time.sleep(1)
else:
logger.error("Unable to verify domain " + achall.domain)
return None
return response
def cleanup(self, achalls):
# pylint: disable=missing-docstring
for achall in achalls:
try:
zone = self._find_zone(achall.domain)
except ValueError:
logger.warn("Unable to find zone for " + achall.domain + ". Skipping cleanup.")
continue
_, validation = achall.response_and_validation()
self._excute_r53_action(achall, zone, validation, 'DELETE')
return None
def _excute_r53_action(self, achall, zone, validation, action, wait_for_change=False):
response = self.r53.change_resource_record_sets(
HostedZoneId=zone["Id"],
ChangeBatch={
'Comment': 'Let\'s Encrypt ' + action,
'Changes': [
{
'Action': action,
'ResourceRecordSet': {
'Name': achall.validation_domain_name(achall.domain),
'Type': 'TXT',
'TTL': TTL,
'ResourceRecords': [
{
'Value': '"' + validation + '"',
},
],
},
},
],
},
)
if wait_for_change:
while self.r53.get_change(Id=response["ChangeInfo"]["Id"])["ChangeInfo"]["Status"] == "PENDING":
logger.info("Waiting for " + action + " to propagate...")
time.sleep(1)
``` |
{
"source": "job/NIPAP",
"score": 2
} |
#### File: nipapwww/controllers/auth.py
```python
import logging
import sys
import os
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from nipapwww.lib.base import BaseController, render
from nipap.authlib import AuthFactory, AuthError
from nipap.nipapconfig import NipapConfig
from ConfigParser import NoOptionError
log = logging.getLogger(__name__)
class AuthController(BaseController):
""" Deals with authentication.
"""
requires_auth = False
def login(self):
""" Show login form.
"""
if request.method != 'POST':
cfg = NipapConfig()
try:
c.welcome_message = cfg.get('www', 'welcome_message')
except NoOptionError:
pass
return render('login.html')
# Verify username and password.
auth_fact = AuthFactory()
auth = auth_fact.get_auth(request.params.get('username'), request.params.get('password'), '<PASSWORD>')
if not auth.authenticate():
c.error = 'Invalid username or password'
return render('login.html')
# Mark user as logged in
session['user'] = auth.username
session['full_name'] = auth.full_name
session['readonly'] = auth.readonly
session['current_vrfs'] = {}
session.save()
# Send user back to the page he originally wanted to get to
if session.get('path_before_login'):
redirect(session['path_before_login'])
else:
# if previous target is unknown just send the user to a welcome page
redirect(url(controller='prefix', action='list'))
def logout(self):
""" Log out the user and display a confirmation message.
"""
# remove session
session.delete()
return render('login.html')
```
#### File: nipapwww/controllers/xhr.py
```python
import logging
try:
import json
except ImportError:
import simplejson as json
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from nipapwww.lib.base import BaseController, render
from pynipap import Tag, VRF, Prefix, Pool, NipapError
log = logging.getLogger(__name__)
class XhrController(BaseController):
""" Interface to a few of the NIPAP API functions.
"""
@classmethod
def extract_prefix_attr(cls, req):
""" Extract prefix attributes from arbitary dict.
"""
# TODO: add more?
attr = {}
if 'id' in request.params:
attr['id'] = int(request.params['id'])
if 'prefix' in request.params:
attr['prefix'] = request.params['prefix']
if 'pool' in request.params:
attr['pool'] = { 'id': int(request.params['pool']) }
if 'node' in request.params:
attr['node'] = request.params['node']
if 'type' in request.params:
attr['type'] = request.params['type']
if 'country' in request.params:
attr['country'] = request.params['country']
if 'indent' in request.params:
attr['indent'] = request.params['indent']
return attr
@classmethod
def extract_pool_attr(cls, req):
""" Extract pool attributes from arbitary dict.
"""
attr = {}
if 'id' in request.params:
attr['id'] = int(request.params['id'])
if 'name' in request.params:
attr['name'] = request.params['name']
if 'description' in request.params:
attr['description'] = request.params['description']
if 'default_type' in request.params:
attr['default_type'] = request.params['default_type']
if 'ipv4_default_prefix_length' in request.params:
attr['ipv4_default_prefix_length'] = int(request.params['ipv4_default_prefix_length'])
if 'ipv6_default_prefix_length' in request.params:
attr['ipv6_default_prefix_length'] = int(request.params['ipv6_default_prefix_length'])
return attr
def list_vrf(self):
""" List VRFs and return JSON encoded result.
"""
try:
vrfs = VRF.list()
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(vrfs, cls=NipapJSONEncoder)
def smart_search_vrf(self):
""" Perform a smart VRF search.
The "smart" search function tries extract a query from
a text string. This query is then passed to the search_vrf
function, which performs the search.
"""
search_options = {}
extra_query = None
if 'query_id' in request.params:
search_options['query_id'] = request.params['query_id']
if 'max_result' in request.params:
search_options['max_result'] = request.params['max_result']
if 'offset' in request.params:
search_options['offset'] = request.params['offset']
if 'vrf_id' in request.params:
extra_query = {
'val1': 'id',
'operator': 'equals',
'val2': request.params['vrf_id']
}
try:
result = VRF.smart_search(request.params['query_string'],
search_options, extra_query
)
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(result, cls=NipapJSONEncoder)
def add_vrf(self):
""" Add a new VRF to NIPAP and return its data.
"""
v = VRF()
if 'rt' in request.params:
if request.params['rt'].strip() != '':
v.rt = request.params['rt'].strip()
if 'name' in request.params:
if request.params['name'].strip() != '':
v.name = request.params['name'].strip()
if 'description' in request.params:
v.description = request.params['description']
if 'tags' in request.params:
v.tags = json.loads(request.params['tags'])
try:
v.save()
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(v, cls=NipapJSONEncoder)
def edit_vrf(self, id):
""" Edit a VRF.
"""
try:
v = VRF.get(int(id))
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
if 'rt' in request.params:
if request.params['rt'].strip() != '':
v.rt = request.params['rt'].strip()
else:
v.rt = None
if 'name' in request.params:
if request.params['name'].strip() != '':
v.name = request.params['name'].strip()
else:
v.name = None
if 'description' in request.params:
v.description = request.params['description']
if 'tags' in request.params:
v.tags = json.loads(request.params['tags'])
try:
v.save()
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(v, cls=NipapJSONEncoder)
def remove_vrf(self):
""" Remove a VRF.
"""
try:
vrf = VRF.get(int(request.params['id']))
vrf.remove()
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(vrf, cls=NipapJSONEncoder)
def list_pool(self):
""" List pools and return JSON encoded result.
"""
# fetch attributes from request.params
attr = XhrController.extract_pool_attr(request.params)
try:
pools = Pool.list(attr)
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(pools, cls=NipapJSONEncoder)
def smart_search_pool(self):
""" Perform a smart pool search.
The "smart" search function tries extract a query from
a text string. This query is then passed to the search_pool
function, which performs the search.
"""
search_options = {}
if 'query_id' in request.params:
search_options['query_id'] = request.params['query_id']
if 'max_result' in request.params:
search_options['max_result'] = request.params['max_result']
if 'offset' in request.params:
search_options['offset'] = request.params['offset']
try:
result = Pool.smart_search(request.params['query_string'],
search_options
)
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(result, cls=NipapJSONEncoder)
def add_pool(self):
""" Add a pool.
"""
# extract attributes
p = Pool()
p.name = request.params.get('name')
p.description = request.params.get('description')
p.default_type = request.params.get('default_type')
if 'ipv4_default_prefix_length' in request.params:
if request.params['ipv4_default_prefix_length'].strip() != '':
p.ipv4_default_prefix_length = request.params['ipv4_default_prefix_length']
if 'ipv6_default_prefix_length' in request.params:
if request.params['ipv6_default_prefix_length'].strip() != '':
p.ipv6_default_prefix_length = request.params['ipv6_default_prefix_length']
if 'tags' in request.params:
p.tags = json.loads(request.params['tags'])
try:
p.save()
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(p, cls=NipapJSONEncoder)
def edit_pool(self, id):
""" Edit a pool.
"""
# extract attributes
p = Pool.get(int(id))
if 'name' in request.params:
p.name = request.params.get('name')
if 'description' in request.params:
p.description = request.params.get('description')
if 'default_type' in request.params:
p.default_type = request.params.get('default_type')
if 'ipv4_default_prefix_length' in request.params:
if request.params['ipv4_default_prefix_length'].strip() != '':
p.ipv4_default_prefix_length = request.params['ipv4_default_prefix_length']
else:
p.ipv4_default_prefix_length = None
if 'ipv6_default_prefix_length' in request.params:
if request.params['ipv6_default_prefix_length'].strip() != '':
p.ipv6_default_prefix_length = request.params['ipv6_default_prefix_length']
else:
p.ipv6_default_prefix_length = None
if 'tags' in request.params:
p.tags = json.loads(request.params['tags'])
try:
p.save()
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(p, cls=NipapJSONEncoder)
def remove_pool(self):
""" Remove a pool.
"""
try:
pool = Pool.get(int(request.params['id']))
pool.remove()
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(pool, cls=NipapJSONEncoder)
def list_prefix(self):
""" List prefixes and return JSON encoded result.
"""
# fetch attributes from request.params
attr = XhrController.extract_prefix_attr(request.params)
try:
prefixes = Prefix.list(attr)
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(prefixes, cls=NipapJSONEncoder)
def search_prefix(self):
""" Search prefixes. Does not yet incorporate all the functions of the
search_prefix API function due to difficulties with transferring
a complete 'dict-to-sql' encoded data structure.
Instead, a list of prefix attributes can be given which will be
matched with the 'equals' operator if notheing else is specified. If
multiple attributes are given, they will be combined with the 'and'
operator. Currently, it is not possible to specify different
operators for different attributes.
"""
# extract operator
if 'operator' in request.params:
operator = request.params['operator']
else:
operator = 'equals'
# fetch attributes from request.params
attr = XhrController.extract_prefix_attr(request.params)
# build query dict
n = 0
q = {}
for key, val in attr.items():
if n == 0:
q = {
'operator': operator,
'val1': key,
'val2': val
}
else:
q = {
'operator': 'and',
'val1': {
'operator': operator,
'val1': key,
'val2': val
},
'val2': q
}
n += 1
# extract search options
search_opts = {}
if 'children_depth' in request.params:
search_opts['children_depth'] = request.params['children_depth']
if 'parents_depth' in request.params:
search_opts['parents_depth'] = request.params['parents_depth']
if 'include_neighbors' in request.params:
search_opts['include_neighbors'] = request.params['include_neighbors']
if 'max_result' in request.params:
search_opts['max_result'] = request.params['max_result']
if 'offset' in request.params:
search_opts['offset'] = request.params['offset']
try:
result = Prefix.search(q, search_opts)
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(result, cls=NipapJSONEncoder)
def smart_search_prefix(self):
""" Perform a smart search.
The smart search function tries extract a query from
a text string. This query is then passed to the search_prefix
function, which performs the search.
"""
search_options = {}
extra_query = None
vrf_filter = None
if 'query_id' in request.params:
search_options['query_id'] = request.params['query_id']
if 'include_all_parents' in request.params:
if request.params['include_all_parents'] == 'true':
search_options['include_all_parents'] = True
else:
search_options['include_all_parents'] = False
if 'include_all_children' in request.params:
if request.params['include_all_children'] == 'true':
search_options['include_all_children'] = True
else:
search_options['include_all_children'] = False
if 'parents_depth' in request.params:
search_options['parents_depth'] = request.params['parents_depth']
if 'children_depth' in request.params:
search_options['children_depth'] = request.params['children_depth']
if 'include_neighbors' in request.params:
if request.params['include_neighbors'] == 'true':
search_options['include_neighbors'] = True
else:
search_options['include_neighbors'] = False
if 'max_result' in request.params:
search_options['max_result'] = request.params['max_result']
if 'offset' in request.params:
search_options['offset'] = request.params['offset']
if 'parent_prefix' in request.params:
search_options['parent_prefix'] = request.params['parent_prefix']
if 'vrf_filter[]' in request.params:
vrf_filter_parts = []
# Fetch VRF IDs from search query and build extra query dict for
# smart_search_prefix.
vrfs = request.params.getall('vrf_filter[]')
if len(vrfs) > 0:
vrf = vrfs[0]
vrf_filter = {
'operator': 'equals',
'val1': 'vrf_id',
'val2': vrf if vrf != 'null' else None
}
for vrf in vrfs[1:]:
vrf_filter = {
'operator': 'or',
'val1': vrf_filter,
'val2': {
'operator': 'equals',
'val1': 'vrf_id',
'val2': vrf if vrf != 'null' else None
}
}
if vrf_filter:
extra_query = vrf_filter
if 'indent' in request.params:
if extra_query:
extra_query = {
'operator': 'and',
'val1': extra_query,
'val2': {
'operator': 'equals',
'val1': 'indent',
'val2': request.params['indent']
}
}
else:
extra_query = {
'operator': 'equals',
'val1': 'indent',
'val2': request.params['indent']
}
try:
result = Prefix.smart_search(request.params['query_string'],
search_options, extra_query)
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(result, cls=NipapJSONEncoder)
def add_prefix(self):
""" Add prefix according to the specification.
The following keys can be used:
vrf ID of VRF to place the prefix in
prefix the prefix to add if already known
family address family (4 or 6)
description A short description
comment Longer comment
node Hostname of node
type Type of prefix; reservation, assignment, host
status Status of prefix; assigned, reserved, quarantine
pool ID of pool
country Country where the prefix is used
order_id Order identifier
customer_id Customer identifier
vlan VLAN ID
alarm_priority Alarm priority of prefix
monitor If the prefix should be monitored or not
from-prefix A prefix the prefix is to be allocated from
from-pool A pool (ID) the prefix is to be allocated from
prefix_length Prefix length of allocated prefix
"""
p = Prefix()
# Sanitize input parameters
if 'vrf' in request.params:
try:
if request.params['vrf'] is None or len(request.params['vrf']) == 0:
p.vrf = None
else:
p.vrf = VRF.get(int(request.params['vrf']))
except ValueError:
return json.dumps({'error': 1, 'message': "Invalid VRF ID '%s'" % request.params['vrf']})
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
if 'description' in request.params:
if request.params['description'].strip() != '':
p.description = request.params['description'].strip()
if 'comment' in request.params:
if request.params['comment'].strip() != '':
p.comment = request.params['comment'].strip()
if 'node' in request.params:
if request.params['node'].strip() != '':
p.node = request.params['node'].strip()
if 'status' in request.params:
p.status = request.params['status'].strip()
if 'type' in request.params:
p.type = request.params['type'].strip()
if 'pool' in request.params:
if request.params['pool'].strip() != '':
try:
p.pool = Pool.get(int(request.params['pool']))
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
if 'country' in request.params:
if request.params['country'].strip() != '':
p.country = request.params['country'].strip()
if 'order_id' in request.params:
if request.params['order_id'].strip() != '':
p.order_id = request.params['order_id'].strip()
if 'customer_id' in request.params:
if request.params['customer_id'].strip() != '':
p.customer_id = request.params['customer_id'].strip()
if 'alarm_priority' in request.params:
p.alarm_priority = request.params['alarm_priority'].strip()
if 'monitor' in request.params:
if request.params['monitor'] == 'true':
p.monitor = True
else:
p.monitor = False
if 'vlan' in request.params:
if request.params['vlan'].strip() != '':
p.vlan = request.params['vlan']
if 'tags' in request.params:
p.tags = json.loads(request.params['tags'])
# arguments
args = {}
if 'from_prefix[]' in request.params:
args['from-prefix'] = request.params.getall('from_prefix[]')
if 'from_pool' in request.params:
try:
args['from-pool'] = Pool.get(int(request.params['from_pool']))
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
if 'family' in request.params:
args['family'] = request.params['family']
if 'prefix_length' in request.params:
args['prefix_length'] = request.params['prefix_length']
# manual allocation?
if args == {}:
if 'prefix' in request.params:
p.prefix = request.params['prefix']
try:
p.save(args)
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(p, cls=NipapJSONEncoder)
def edit_prefix(self, id):
""" Edit a prefix.
"""
try:
p = Prefix.get(int(id))
# extract attributes
if 'prefix' in request.params:
p.prefix = request.params['prefix']
if 'type' in request.params:
p.type = request.params['type'].strip()
if 'description' in request.params:
if request.params['description'].strip() == '':
p.description = None
else:
p.description = request.params['description'].strip()
if 'comment' in request.params:
if request.params['comment'].strip() == '':
p.comment = None
else:
p.comment = request.params['comment'].strip()
if 'node' in request.params:
if request.params['node'].strip() == '':
p.node = None
else:
p.node = request.params['node'].strip()
if 'status' in request.params:
p.status = request.params['status'].strip()
if 'pool' in request.params:
if request.params['pool'].strip() == '':
p.pool = None
else:
try:
p.pool = Pool.get(int(request.params['pool']))
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
if 'alarm_priority' in request.params:
p.alarm_priority = request.params['alarm_priority'].strip()
if 'monitor' in request.params:
if request.params['monitor'] == 'true':
p.monitor = True
else:
p.monitor = False
if 'country' in request.params:
if request.params['country'].strip() == '':
p.country = None
else:
p.country = request.params['country'].strip()
if 'order_id' in request.params:
if request.params['order_id'].strip() == '':
p.order_id = None
else:
p.order_id = request.params['order_id'].strip()
if 'customer_id' in request.params:
if request.params['customer_id'].strip() == '':
p.customer_id = None
else:
p.customer_id = request.params['customer_id'].strip()
if 'vrf' in request.params:
try:
if request.params['vrf'] is None or len(request.params['vrf']) == 0:
p.vrf = None
else:
p.vrf = VRF.get(int(request.params['vrf']))
except ValueError:
return json.dumps({'error': 1, 'message': "Invalid VRF ID '%s'" % request.params['vrf']})
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
if 'vlan' in request.params:
if request.params['vlan'].strip() != '':
p.vlan = request.params['vlan']
if 'tags' in request.params:
p.tags = json.loads(request.params['tags'])
p.save()
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(p, cls=NipapJSONEncoder)
def remove_prefix(self):
""" Remove a prefix.
"""
try:
p = Prefix.get(int(request.params['id']))
p.remove()
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(p, cls=NipapJSONEncoder)
def add_current_vrf(self):
""" Add VRF to filter list session variable
"""
vrf_id = request.params.get('vrf_id')
if vrf_id is not None:
if vrf_id == 'null':
vrf = VRF()
else:
vrf = VRF.get(int(vrf_id))
session['current_vrfs'][vrf_id] = { 'id': vrf.id, 'rt': vrf.rt,
'name': vrf.name, 'description': vrf.description }
session.save()
return json.dumps(session.get('current_vrfs', {}))
def del_current_vrf(self):
""" Remove VRF to filter list session variable
"""
vrf_id = request.params.get('vrf_id')
if vrf_id in session['current_vrfs']:
del session['current_vrfs'][vrf_id]
session.save()
return json.dumps(session.get('current_vrfs', {}))
def get_current_vrfs(self):
""" Return VRF filter list from session variable
"""
return json.dumps(session.get('current_vrfs', {}))
def list_tags(self):
""" List Tags and return JSON encoded result.
"""
try:
tags = Tags.list()
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(tags, cls=NipapJSONEncoder)
class NipapJSONEncoder(json.JSONEncoder):
""" A class used to encode NIPAP objects to JSON.
"""
def default(self, obj):
if isinstance(obj, Tag):
return {
'name': obj.name
}
elif isinstance(obj, VRF):
return {
'id': obj.id,
'rt': obj.rt,
'name': obj.name,
'description': obj.description,
'tags': obj.tags
}
elif isinstance(obj, Pool):
if obj.vrf is None:
vrf_id = None
vrf_rt = None
else:
vrf_id = obj.vrf.id
vrf_rt = obj.vrf.rt
return {
'id': obj.id,
'name': obj.name,
'vrf_rt': vrf_rt,
'vrf_id': vrf_id,
'description': obj.description,
'default_type': obj.default_type,
'ipv4_default_prefix_length': obj.ipv4_default_prefix_length,
'ipv6_default_prefix_length': obj.ipv6_default_prefix_length,
'tags': obj.tags
}
elif isinstance(obj, Prefix):
if obj.pool is None:
pool = None
else:
pool = obj.pool.id
vrf_id = obj.vrf.id
vrf_rt = obj.vrf.rt
return {
'id': obj.id,
'family': obj.family,
'vrf_rt': vrf_rt,
'vrf_id': vrf_id,
'prefix': obj.prefix,
'display_prefix': obj.display_prefix,
'description': obj.description,
'comment': obj.comment,
'inherited_tags': obj.inherited_tags,
'tags': obj.tags,
'node': obj.node,
'pool': pool,
'type': obj.type,
'indent': obj.indent,
'country': obj.country,
'order_id': obj.order_id,
'customer_id': obj.customer_id,
'authoritative_source': obj.authoritative_source,
'monitor': obj.monitor,
'alarm_priority': obj.alarm_priority,
'display': obj.display,
'match': obj.match,
'children': obj.children,
'vlan': obj.vlan
}
else:
return json.JSONEncoder.default(self, obj)
``` |
{
"source": "Jobo42/exercises",
"score": 4
} |
#### File: exercises/anagram solver/anagram_detector.py
```python
def detect_anagrams(parent, child):
anagrams = []
for i in range(len(parent) - len(child)):
letter = parent[i]
if letter in child:
word = parent[i:i+len(child)]
if is_anagram(word, child):
anagrams.append(word)
return anagrams
# Strings must be the same length
def is_anagram(s1, s2):
for letter in s1:
if s1.count(letter) != s2.count(letter):
return False
return True
phrase = "AdnBndAndBdaBn"
key = "dAn"
anagrams = detect_anagrams(phrase, key)
print(anagrams)
``` |
{
"source": "J-Obog/bball-sim",
"score": 3
} |
#### File: bball-sim/bball/player.py
```python
from random import randint
import bball.constants as C
class Player:
def __init__(self, name, pos, ratings):
self.name = name
self.pos = pos
self.ratings = ratings
self.stats = [0 for s in range(len(C.PLAYER_STATS))]
``` |
{
"source": "J-Obog/circuit-sim",
"score": 3
} |
#### File: circuit-sim/csim/base_component.py
```python
from _typeshed import Self
from typing import Tuple
class BaseComponent:
def __init__(self, x: int, y: int, w: int, h: int, label: str):
self._x: int = x
self._y: int = y
self._w: int = w
self._h: int = h
self._label: str = label
def setBounds(self, w: int, h: int):
self._w = w
self._h = h
def getBounds(self) -> Tuple[int, int]:
return (self._w, self._h)
def setPos(self, x: int, y: int):
self._x = x
self._y = y
def getPos(self) -> Tuple[int, int]:
return (self._x, self._y)
def setLabel(self, label: str):
self._label = label
def getLabel(self) -> str:
return self._label
```
#### File: J-Obog/circuit-sim/main.py
```python
from PyQt5.QtWidgets import QApplication, QWidget
import sys
def main():
app = QApplication(sys.argv)
win = QWidget()
win.resize(500, 500)
win.setWindowTitle('Main App')
win.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
``` |
{
"source": "J-Obog/linreg-model",
"score": 3
} |
#### File: J-Obog/linreg-model/simplelearn.py
```python
import numpy as np
class LinReg_Model:
def __init__(self, iterations=1000, learning_rate=0.01):
self.__weigths = None
self.__learning_rate = learning_rate
self.__iterations = iterations
#tweaking weights with gradient descent
def train(self, X, Y):
X, Y = (np.asarray(X), np.asarray(Y))
m,n = X.shape
self.__weigths = np.ones(n + 1)
X = np.hstack((X, np.ones((m, 1))))
for itr in self.__iterations:
pass
return 0
#test model with inputs
def predict(self, X):
return np.array([np.dot(self.__weigths, x) for x in X])
``` |
{
"source": "J-Obog/littlekv",
"score": 2
} |
#### File: lkv/cli/run.py
```python
from socketio.exceptions import ConnectionError
from lkv.flags import parse_cli_args
import socketio
import time
def response_to_stdout(*args):
res = args[0]
err = args[1]
msg = res if not err else err
print(msg)
def main():
args = parse_cli_args()
host = args.host
port = args.port
command = args.command
op = command[0]
params = command[1:]
try:
client = socketio.Client()
client.connect(f'ws://{host}:{port}')
client.emit('lkv:command', {'cmd': op, 'params': params}, callback=response_to_stdout)
time.sleep(0.5)
client.disconnect()
except ConnectionError:
print(f'Unable to connect to host {host} on port {port}')
exit(0)
if __name__ == '__main__':
main()
```
#### File: lkv/store/__init__.py
```python
from typing import Dict, Optional, Union
import os
import toml
import gzip
class KVStore:
def __init__(self, path: str, filename: str):
self.__fp: str = os.path.join(path, filename)
self.__kv: Dict[str, any] = {}
if not os.path.exists(path):
os.mkdir(path)
if not os.path.exists(self.__fp):
open(self.__fp, 'wb+').close()
else:
self.__read_keys()
""" Storage interface """
def __read_keys(self):
with open(self.__fp, 'rb') as f:
bytes_decompressed = gzip.decompress(f.read())
content = bytes_decompressed.decode('utf-8')
self.__kv = toml.loads(content)
def __write_keys(self):
with open(self.__fp, 'wb') as f:
content = toml.dumps(self.__kv)
bytes_compressed = gzip.compress(content.encode('utf-8'))
f.write(bytes_compressed)
""" API """
def getk(self, key = None) -> Union[Optional[any], Dict[str, any]]:
return self.__kv.get(key, None) if key else list(self.__kv.keys())
def setk(self, key, val):
self.__kv[key] = val
self.__write_keys()
def delk(self, key):
if self.__kv.get(key, None) != None:
self.__kv.pop(key)
self.__write_keys()
def countk(self) -> int:
return len(self.__kv.keys())
def cleark(self):
self.__kv.clear()
self.__write_keys()
```
#### File: littlekv/tests/helpers.py
```python
from typing import Tuple
import subprocess
import time
import os
import signal
import sys
def launch_client_proc(cmd: str, conn_delay: int = 0.25) -> Tuple[int, int, str]:
time.sleep(conn_delay)
client_proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
exit_code = client_proc.wait()
output = client_proc.communicate()[0].decode('utf-8').strip()
return (client_proc.pid, exit_code, output)
def launch_server_proc(cmd: str) -> Tuple[int]:
server_proc = subprocess.Popen(cmd.split())
return server_proc.pid
def kill_server_proc(pid: str, timeout: int = 0.25):
if sys.platform[:-2] == 'win':
os.kill(pid, signal.CTRL_C_EVENT)
else:
os.kill(pid, signal.SIGINT)
time.sleep(timeout)
``` |
{
"source": "J-Obog/market-simulator",
"score": 3
} |
#### File: api/auth/controller.py
```python
from flask import request, jsonify
from app import db, bcrypt, cache, jwt
from api.accounts.model import Account, AccountSchema
from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, current_user
from datetime import timedelta
@jwt.token_in_blocklist_loader
def check_token_in_blacklist(_, jwt_payload):
return cache.get(jwt_payload['sub']) is not None
@jwt.user_lookup_loader
def user_lookup(_, jwt_payload):
return Account.query.filter_by(id=jwt_payload['sub']).one_or_none()
""" Log a user out"""
@jwt_required()
def logout():
cache.set(current_user.id, '', ex=3600)
return jsonify(message='Logout successful')
""" Log a user in """
def login():
# request body vars
email = request.json.get('email')
password = <PASSWORD>('password')
# query for account with matching email
acc = Account.query.filter_by(email=email).first()
# validate if there's a match and the match shares the same password
if acc:
if bcrypt.check_password_hash(acc.password, password):
access_token = create_access_token(identity=acc.id, expires_delta=timedelta(hours=1))
refresh_token = create_refresh_token(identity=acc.id, expires_delta=timedelta(days=30))
return jsonify(access_token=access_token, refresh_token=refresh_token)
else:
return jsonify(message='Email and password must match'), 401
else:
return jsonify(message='No matching account for email'), 401
""" Sign user up """
def register_user():
# request body vars
email = request.json.get('email')
password = request.json.get('password')
# handling validation errors
try:
AccountSchema().load(request.json)
except Exception as e:
return jsonify(message=e.messages), 401
if Account.query.filter_by(email=email).first():
return jsonify(message={'email': ['Account with email already exists']}), 401
# loading user into db
acc = Account(email=email, password=bcrypt.generate_password_hash(password, 10).decode('utf-8'))
db.session.add(acc)
db.session.commit()
return jsonify(message='Registration successful')
```
#### File: migrations/versions/cef206444493_finished_stockdata_model.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cef206444493'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('stock_data',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('stock_id', sa.Integer(), nullable=False),
sa.Column('prev_close', sa.Float(decimal_return_scale=2), nullable=False),
sa.Column('market_price', sa.Float(decimal_return_scale=2), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['stock_id'], ['stock.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('stock_data')
# ### end Alembic commands ###
``` |
{
"source": "J-Obog/social-network-model",
"score": 4
} |
#### File: J-Obog/social-network-model/network.py
```python
class Network:
def __init__(self, capacity = -1):
self.__capacity = capacity
self.__size = 0
self.__map = {}
self.__graph = []
self.__sparse_queue = []
""" add new user to the network """
def add(self, user):
if (len(self.__sparse_queue) == 0) and (self.__size == self.__capacity):
raise Exception('Network has reached capacity')
if self.__map.get(user) != None:
raise Exception('Username already exists in network')
if len(self.__sparse_queue) == 0:
self.__map[user] = self.__size
for i in range(self.__size):
self.__graph[i].append(0)
self.__graph.append([0] * (self.__size + 1))
else:
self.__map[user] = self.__sparse_queue[0]
self.__sparse_queue.pop(0)
self.__size += 1
""" remove user from the network """
def remove(self, user):
if self.__map.get(user) == None:
raise Exception("User doesn't exist in network")
for i in range(self.__size):
self.__graph[i][self.__map[user]] = 0
self.__sparse_queue.append(self.__map[user])
self.__map.pop(user)
""" change user's name """
def update(self, user, new_user):
if self.__map.get(user) == None:
raise Exception("User doesn't exist in network")
if self.__map.get(new_user) != None:
raise Exception('Username already exists in network')
self.__map[new_user] = self.__map[user]
self.__map.pop(user)
""" make user1 and user2 friends """
def link(self, user1, user2):
if self.__map.get(user1) == None or self.__map.get(user2) == None:
raise Exception("Both users must exist in network")
self.__graph[self.__map[user1]][self.__map[user2]] = 1
self.__graph[self.__map[user2]][self.__map[user1]] = 1
""" get list of user's friends """
def friend_list(self, user):
if self.__map.get(user) == None:
raise Exception("User doesn't exist in network")
lst = []
for i in range(self.__size):
if self.__graph[self.__map[user]][i] == 1:
for pair in self.__map.items():
k, v = pair
if v == i:
lst.append(k)
return lst
""" get list of user1 and user2's mutual friends """
def mutual_friends(self, user1, user2):
if self.__map.get(user1) == None or self.__map.get(user2) == None:
raise Exception("Both users must exist in network")
return list(set(self.friend_list(user1)) & set(self.friend_list(user2)))
""" check if user1 and user 2 are friends """
def are_friends(self, user1, user2):
if self.__map.get(user1) == None or self.__map.get(user2) == None:
raise Exception("Both users must exist in network")
return (self.__graph[self.__map[user1]][self.__map[user2]] == 1)
""" check if user1 and user2 are mutual friends """
def are_mutual_friends(self, user1, user2):
if self.__map.get(user1) == None or self.__map.get(user2) == None:
raise Exception("Both users must exist in network")
for i in range(self.__size):
if i != self.__map[user1] and i != self.__map[user2]:
if self.__graph[i][self.__map[user1]] == 1 and self.__graph[i][self.__map[user2]] == 1:
return True
return False
```
#### File: J-Obog/social-network-model/run.py
```python
from network import Network
def main():
net = Network()
# adding users to network
net.add('JDoe63')
net.add('SJohnson78')
net.add('DFord99')
# 'friending' two users
net.link('JDoe63', 'SJohnson78')
net.link('DFord99', 'SJohnson78')
# testing out Network API
print(f"\nDFord99 and JDoe63 are mutual friends: {net.are_mutual_friends('DFord99', 'JDoe63')}")
print(f"\nJDoe63's friend list: {net.friend_list('JDoe63')}")
print(f"\nSJohnson78's friend list: {net.friend_list('SJohnson78')}")
if __name__ == '__main__':
main()
``` |
{
"source": "jobomix/ariadne-relay",
"score": 2
} |
#### File: ariadne-relay/ariadne_relay/node.py
```python
import asyncio
from typing import Any, Awaitable, Callable, cast, Optional, Tuple, Union
from graphql import default_field_resolver, GraphQLObjectType, GraphQLResolveInfo
from graphql_relay import from_global_id, to_global_id
from .objects import RelayObjectType
from .utils import is_coroutine_callable
NodeIdAwaitable = Callable[..., Awaitable[str]]
NodeIdCallable = Callable[..., str]
NodeIdResolver = Union[NodeIdAwaitable, NodeIdCallable]
NodeInstanceAwaitable = Callable[..., Awaitable[Any]]
NodeInstanceCallable = Callable[..., Any]
NodeInstanceResolver = Union[NodeInstanceAwaitable, NodeInstanceCallable]
INSTANCE_RESOLVER = "ariadne_relay_node_instance_resolver"
class NodeObjectType(RelayObjectType):
_resolve_id: NodeIdResolver
_resolve_instance: Optional[NodeInstanceResolver]
def __init__(
self,
name: str,
*,
id_resolver: NodeIdResolver = default_field_resolver,
instance_resolver: Optional[NodeInstanceResolver] = None,
) -> None:
super().__init__(name)
self._resolve_id = id_resolver
self._resolve_instance = instance_resolver
def bind_resolvers_to_graphql_type(
self, graphql_type: GraphQLObjectType, replace_existing: bool = True
) -> None:
super().bind_resolvers_to_graphql_type(graphql_type, replace_existing)
if "id" not in graphql_type.fields:
raise ValueError(f"Field id is not defined on type {self.name}")
if graphql_type.fields["id"].resolve is None or replace_existing:
if is_coroutine_callable(self._resolve_id):
graphql_type.fields["id"].resolve = self._resolve_node_id_field
else:
graphql_type.fields["id"].resolve = self._resolve_node_id_field_sync
if self._resolve_instance is not None:
graphql_type.extensions = graphql_type.extensions or {}
graphql_type.extensions[INSTANCE_RESOLVER] = self._resolve_instance
async def _resolve_node_id_field(self, obj: Any, info: GraphQLResolveInfo) -> str:
resolve_id = cast(NodeIdAwaitable, self._resolve_id)
return to_global_id(self.name, await resolve_id(obj, info))
def _resolve_node_id_field_sync(self, obj: Any, info: GraphQLResolveInfo) -> str:
resolve_id = cast(NodeIdCallable, self._resolve_id)
return to_global_id(self.name, resolve_id(obj, info))
def set_id_resolver(self, id_resolver: NodeIdResolver) -> NodeIdResolver:
self._resolve_id = id_resolver
return id_resolver
def set_instance_resolver(
self, instance_resolver: NodeInstanceResolver
) -> NodeInstanceResolver:
self._resolve_instance = instance_resolver
return instance_resolver
# Alias resolvers for consistent decorator API
id_resolver = set_id_resolver
instance_resolver = set_instance_resolver
async def resolve_node_query(
_: None,
info: GraphQLResolveInfo,
*,
id: str, # noqa: A002
) -> Any:
instance_resolver_and_node_id = _get_instance_resolver_and_node_id(info, id)
if instance_resolver_and_node_id:
instance_resolver, node_id = instance_resolver_and_node_id
node_instance = instance_resolver(node_id, info)
if asyncio.iscoroutine(node_instance):
node_instance = await node_instance
return node_instance
return None
def resolve_node_query_sync(
_: None,
info: GraphQLResolveInfo,
*,
id: str, # noqa: A002
) -> Any:
instance_resolver_and_node_id = _get_instance_resolver_and_node_id(info, id)
if instance_resolver_and_node_id:
instance_resolver, node_id = instance_resolver_and_node_id
return instance_resolver(node_id, info)
return None
def _get_instance_resolver_and_node_id(
info: GraphQLResolveInfo,
raw_id: str,
) -> Optional[Tuple[NodeInstanceResolver, str]]:
try:
node_type_name, node_id = from_global_id(raw_id)
except Exception as e:
raise ValueError(f'Invalid ID "{raw_id}"') from e
node_type = info.schema.type_map.get(node_type_name)
if node_type is None:
return None
instance_resolver = getattr(node_type, "extensions", {}).get(INSTANCE_RESOLVER)
return instance_resolver, node_id
``` |
{
"source": "jobor019/pyosc",
"score": 2
} |
#### File: maxosc/maxosc/oscloghandler.py
```python
import logging
from maxosc.sender import Sender
class OscLogHandler(logging.Handler):
def __init__(self, sender: Sender, log_level: int = logging.INFO, log_format: str = '%(levelname)s %(message)s'):
super().__init__()
self.sender: Sender = sender
self.setLevel(log_level)
self.setFormatter(logging.Formatter(log_format))
def emit(self, record: logging.LogRecord):
self.sender.send_warning(self.format(record))
```
#### File: maxosc/test/max_test_default.py
```python
from maxosc.maxosc import MaxOsc
class MaxTestDefault(MaxOsc):
def __init__(self):
super(MaxTestDefault, self).__init__()
self.run()
def no_args(self):
print("no args")
return "bang"
def no_return(self):
print("no return")
def one_mand(self, mand):
print("one mand", mand)
return mand
def one_opt(self, opt=None):
print("one opt", opt)
return opt
def one_each(self, mand, opt=None):
print("one each", mand, opt)
return mand, opt
def two_each(self, mand1, mand2, opt1=None, opt2=None):
print("two each", mand1, mand2, opt1, opt2)
return mand1, mand2, opt1, opt2
@staticmethod
def stat_noarg():
return "bang"
@staticmethod
def stat_arg(a1):
return a1
if __name__ == '__main__':
MaxTestDefault()
``` |
{
"source": "jobovy/apogee-maps",
"score": 2
} |
#### File: apogee-maps/py/calc_effsel_monoage.py
```python
import numpy as np
import define_rgbsample
import calc_effsel as ce
from optparse import OptionParser
class EffselOptions(object):
def __init__(self, dm_min = 7.,
dm_max = 15.5,
ndm = 301,
dmap = 'marshall06',
samplemh = False,
m = 32,
dr = '12',
samp = 'rc',
modelmh = False,
distgrid = False):
self.dm_min = dm_min
self.dm_max = dm_max
self.ndm = ndm
self.dmap = dmap
self.samplemh = samplemh
self.multi = m
self.dr = dr
self.samp = samp
self.modelmh = modelmh
self.distgrid = distgrid
opt = EffselOptions(dmap = 'green15', samplemh = True, samp='rgb', modelmh=True, distgrid=False)
def effsel_bins(opt):
agebins = np.arange(1.,14.,2.)
fehbins = np.arange(-0.6,0.3,0.1)
for i in range(0, len(fehbins)-1):
for j in range(0,len(agebins)-1):
print 'Calculating Effective Selection function for [Fe/H] = '+str(round(fehbins[i],1))+' and age = '+str(round(agebins[j],1))+''
if opt.distgrid == False:
filename = '../essf/maps/essf_'+opt.samp+'_'+opt.dmap+'_modelmh_feh'+str(round(fehbins[i],1))+'_age'+str(round(agebins[j],1))+'.sav'
if opt.distgrid == True:
filename = '../essf/maps/essf_'+opt.samp+'_'+opt.dmap+'_distgrid_modelmh_feh'+str(round(fehbins[i],1))+'.sav'
ce.calc_effsel([filename,], opt, fehbin=[fehbins[i],fehbins[i+1]], agebin=[agebins[j], agebins[j+1]])
def get_options():
usage = "usage: %prog [options] <savefilename>\n\nsavefilename= name of the file that the effective selection function will be saved to"
parser = OptionParser(usage=usage)
# Distances at which to calculate the effective selection function
parser.add_option("--dm_min",dest='dm_min',default=7.,type='float',
help="Minimum distance modulus")
parser.add_option("--dm_max",dest='dm_max',default=15.5,type='float',
help="Maximum distance modulus")
parser.add_option("--ndm",dest='ndm',default=301,type='int',
help="Number of distance moduli to calculate the function at")
parser.add_option("--distancegrid",action="store_true", dest="distgrid",
default=False,
help="if set, use distance grid rather than distmod")
# Dust map to use
parser.add_option("--dmap",dest='dmap',default='green15',
help="Dust map to use ('Green15', 'Marshall03', 'Drimmel03', 'Sale14', or 'zero'")
# Sample over the M_H of the sample?
parser.add_option("--samplemh",action="store_true", dest="samplemh",
default=True,
help="If set, sample the M_H distribution of the sub-sample (default= full sample)")
# Multiprocessing?
parser.add_option("-m","--multi",dest='multi',default=1,type='int',
help="number of cpus to use")
# Alternate Data releases?
parser.add_option("--dr",dest='dr',default=None,
help="Data release to use")
# RGB or RC?
parser.add_option("--samp", dest='samp', default='rgb', help = "rc or rgb")
# model M_H distribution?
parser.add_option("--modelmh", dest='modelmh', default=True, help="If True, sample a model M_H distribution from parsec isochrones.")
return parser
if __name__ == '__main__':
parser= get_options()
options, args = parser.parse_args()
effsel_bins(options)
```
#### File: apogee-maps/py/calc_effsel.py
```python
import os, os.path
import pickle
import multiprocessing
import numpy
from optparse import OptionParser
from galpy.util import save_pickles, multi
import apogee.select.apogeeSelect
import mwdust
import isodist
from define_rcsample import get_rcsample
from define_rgbsample import get_rgbsample
from scipy.interpolate import interp1d
def calc_effsel(args,options,sample=None, fehbin=[0.,0.1], agebin=[0.,13.]):
# Work-horse function to compute the effective selection function,
# sample is a data sample of stars to consider for the (JK,Z) sampling
# Setup selection function
selectFile= '../savs/selfunc-nospdata.sav'
if os.path.exists(selectFile):
with open(selectFile,'rb') as savefile:
apo= pickle.load(savefile)
else:
# Setup selection function
apo= apogee.select.apogeeSelect()
# Delete these because they're big and we don't need them
del apo._specdata
del apo._photdata
save_pickles(selectFile,apo)
# Get the full data sample for the locations (need all locations where
# stars could be observed, so the whole sample, not just the subsample
# being analyzed)
if options.samp == 'rc':
data= get_rcsample(dr = options.dr)
if options.samp == 'rgb':
data= get_rgbsample(add_ages = True)
locations= list(set(list(data['LOCATION_ID'])))
# Load the dust map and setup the effective selection function
if options.dmap.lower() == 'green15':
dmap3d= mwdust.Green15(filter='2MASS H')
elif options.dmap.lower() == 'marshall06':
dmap3d= mwdust.Marshall06(filter='2MASS H')
elif options.dmap.lower() == 'drimmel03':
dmap3d= mwdust.Drimmel03(filter='2MASS H')
elif options.dmap.lower() == 'sale14':
dmap3d= mwdust.Sale14(filter='2MASS H')
elif options.dmap.lower() == 'zero':
dmap3d= mwdust.Zero(filter='2MASS H')
# Sample the M_H distribution
if options.samplemh:
if sample is None: sample= data
if options.samp == 'rc':
MH= sample['H0']-sample['RC_DM']
MH= numpy.random.permutation(MH)[:1000] # do 1,000 max
if options.samp == 'rgb':
if options.modelmh == True:
MH = sample_iso_MH(fehbin = fehbin, n=1000)
if options.modelmh != True:
MH = numpy.random.permutation(sample['M_H'])[:1000]
else:
MH= -1.49
apof= apogee.select.apogeeEffectiveSelect(apo,dmap3d=dmap3d,MH=MH)
# Distances at which to calculate the effective selection function
if options.distgrid == False:
distmods= numpy.linspace(options.dm_min,options.dm_max,options.ndm)
ds= 10.**(distmods/5-2.)
if options.distgrid == True:
ds = numpy.linspace(0.01,20.,300)
distmods= 5*numpy.log(ds)-2.
# Now compute all selection functions
out= multi.parallel_map((lambda x: _calc_effsel_onelocation(\
locations[x],apof,apo,ds)),
range(len(locations)),
numcores=numpy.amin([len(locations),
multiprocessing.cpu_count(),options.multi]))
# Save out
out= numpy.array(out)
save_pickles(args[0],locations,out,distmods,ds)
return None
def _calc_effsel_onelocation(locid,apof,apo,ds):
# Calculate the effective selection function for a given location
try:
esf= apof(locid,ds)*apo.area(locid)
except (IndexError, TypeError,ValueError):
esf= -numpy.ones_like(ds)
return esf
def sample_iso_MH(fehbin = [-0.6, 0.2], agebin=[0.,13.], n=1000, agebias=False, imftype='chabrier2003', isochrones='Padova'):
#Load pre-computed parsec isochrone grid
iso_file = open('../savs/'+isochrones+'_grid_'+imftype+'.sav')
iso_grid = pickle.load(iso_file)
#add rgb age bias if agebias = True
if agebias == True:
iso_grid[:,6] *= agebias(10**iso_grid[:,0])
#Perform sample cuts (logg and J-K)
gridcuts = (iso_grid[:,3] > 1.8)&(iso_grid[:,3] < 3.0)&(iso_grid[:,5] > 0.5)
cutgrid = iso_grid[gridcuts]
n_weights = cutgrid[:,6]*(10**cutgrid[:,0]/cutgrid[:,1])
#make [Fe/H], age cut
fehcut = (isodist.Z2FEH(cutgrid[:,1])>=fehbin[0])&(isodist.Z2FEH(cutgrid[:,1])<fehbin[1])\
&(10**cutgrid[:,0] >= agebin[0])&(10**cutgrid[:,0] < agebin[1])
# compute CDF of M_H
sorter_H = numpy.argsort(cutgrid[:,4][fehcut])
cdf_H = numpy.cumsum(n_weights[fehcut][sorter_H])/numpy.sum(n_weights[fehcut])
#Interpolate CDF and take n samples
intercdf_H = interp1d(cdf_H, cutgrid[:,4][fehcut][sorter_H])
rand = numpy.random.uniform(size=n, low=0.0001, high=0.999999)
model_MH = intercdf_H(rand)
return model_MH
def agebias(age):
return (age+1)**-0.7
def get_options():
usage = "usage: %prog [options] <savefilename>\n\nsavefilename= name of the file that the effective selection function will be saved to"
parser = OptionParser(usage=usage)
# Distances at which to calculate the effective selection function
parser.add_option("--dm_min",dest='dm_min',default=7.,type='float',
help="Minimum distance modulus")
parser.add_option("--dm_max",dest='dm_max',default=15.5,type='float',
help="Maximum distance modulus")
parser.add_option("--ndm",dest='ndm',default=301,type='int',
help="Number of distance moduli to calculate the function at")
parser.add_option("--distancegrid",action="store_true", dest="distgrid",
default=False,
help="if set, use distance grid rather than distmod")
# Dust map to use
parser.add_option("--dmap",dest='dmap',default='green15',
help="Dust map to use ('Green15', 'Marshall03', 'Drimmel03', 'Sale14', or 'zero'")
# Sample over the M_H of the sample?
parser.add_option("--samplemh",action="store_true", dest="samplemh",
default=False,
help="If set, sample the M_H distribution of the sub-sample (default= full sample)")
# Multiprocessing?
parser.add_option("-m","--multi",dest='multi',default=1,type='int',
help="number of cpus to use")
# Alternate Data releases?
parser.add_option("--dr",dest='dr',default=None,
help="Data release to use")
# RGB or RC?
parser.add_option("--samp", dest='samp', default='rc', help = "rc or rgb")
# model M_H distribution?
parser.add_option("--modelmh", dest='modelmh', default=False, help="If True, sample a model M_H distribution from parsec isochrones.")
return parser
if __name__ == '__main__':
parser= get_options()
options, args= parser.parse_args()
calc_effsel(args,options)
```
#### File: apogee-maps/py/calc_masses.py
```python
import numpy
import numpy as np
import densprofiles
import define_rgbsample
import pickle
from isodist import Z2FEH
from galpy.util import bovy_coords
from fitDens import _setup_densfunc
import os
from scipy.integrate import quad
from scipy import interpolate
import multiprocessing
from galpy.util import multi
def load_isochrones(gridfile):
iso_file = open(gridfile)
iso_grid = pickle.load(iso_file)
return iso_grid
def calc_normalisation(params, nbin, iso_grid,
fehbin=[-0.1,0.0],
agebin=[1.,3.],
loggcut=[1.8,3.0],
teffcut=[4000,5000],
type='brokenexpflare',
verbose=True,
fitIndx=None,
weights = 'padova',
distance_cut = False,
lowermass = None):
#first get the values necessary from the isochrone grid
#make a mask for giant stars (+ J-K cut)
if teffcut == None:
giants = (iso_grid[:,3] >= loggcut[0])&(iso_grid[:,3] < loggcut[1])&(iso_grid[:,5] > 0.5)
else:
giants = (iso_grid[:,3] >= loggcut[0])&(iso_grid[:,3] < loggcut[1])&(iso_grid[:,5] > 0.5)&(10**iso_grid[:,7] >= teffcut[0])&(10**iso_grid[:,7] < teffcut[1])
#make a mask for the age and feh bin
if agebin == None:
bin = (10**iso_grid[:,0] >= 0.)&(10**iso_grid[:,0] < 13.)&\
(Z2FEH(iso_grid[:,1]) >= fehbin[0])&(Z2FEH(iso_grid[:,1]) < fehbin[1])
else:
bin = (10**iso_grid[:,0] >= agebin[0])&(10**iso_grid[:,0] < agebin[1])&\
(Z2FEH(iso_grid[:,1]) >= fehbin[0])&(Z2FEH(iso_grid[:,1]) < fehbin[1])
if lowermass != None:
giants *= iso_grid[:,2] >= lowermass
bin *= iso_grid[:,2] >= lowermass
if len(iso_grid[:,0][bin]) < 1:
fehs = np.unique(Z2FEH(iso_grid[:,1]))
cfehbin = fehbin[0]+((fehbin[1]-fehbin[0])/2)
feh_offsets = np.fabs(fehs-cfehbin)
ind = np.argmin(feh_offsets)
cfeh = fehs[ind]
bin = (10**iso_grid[:,0] >= agebin[0])&(10**iso_grid[:,0] < agebin[1])&\
(Z2FEH(iso_grid[:,1]) == cfeh)
#find the average giant mass
mass = iso_grid[:,2]
if weights == 'padova':
weight = iso_grid[:,6]*(10**iso_grid[:,0]/iso_grid[:,1])
if weights == 'basti':
weight = iso_grid[:,6]
av_mass = np.sum(mass[giants&bin]*weight[giants&bin])/np.sum(weight[giants&bin])
#find the ratio between giants and the total stellar pop. for this bin
mass_total = mass[bin]
weight_total = weight[bin]
mass_bin = mass[giants&bin]
weight_bin = weight[giants&bin]
m_ratio = np.sum(mass_bin*weight_bin)/np.sum(mass_total*weight_total)
#now compute and sum the rate for this density function
#load the raw selection function
selectFile= '../savs/selfunc-nospdata.sav'
if os.path.exists(selectFile):
with open(selectFile,'rb') as savefile:
apo= pickle.load(savefile)
#load the effective selection function
if agebin == None:
with open('../essf/maps/essf_rgb_green15_modelmh_feh'+str(round(fehbin[0],1))+'.sav','rb') as savefile:
locations= pickle.load(savefile)
effsel= pickle.load(savefile)
distmods= pickle.load(savefile)
with open('../essf/maps/essf_rgb_marshall06_modelmh_feh'+str(round(fehbin[0],1))+'.sav','rb') as savefile:
mlocations= pickle.load(savefile)
meffsel= pickle.load(savefile)
mdistmods= pickle.load(savefile)
if agebin != None:
if agebin[0] < 1.:
with open('../essf/maps/essf_rgb_green15_modelmh_feh'+str(round(fehbin[0],1))+'_age'+str(round(1.0,1))+'.sav','rb') as savefile:
locations= pickle.load(savefile)
effsel= pickle.load(savefile)
distmods= pickle.load(savefile)
with open('../essf/maps/essf_rgb_marshall06_modelmh_feh'+str(round(fehbin[0],1))+'_age'+str(round(1.0,1))+'.sav','rb') as savefile:
mlocations= pickle.load(savefile)
meffsel= pickle.load(savefile)
mdistmods= pickle.load(savefile)
if agebin[0] > 0.9:
with open('../essf/maps/essf_rgb_green15_modelmh_feh'+str(round(fehbin[0],1))+'_age'+str(round(agebin[0],1))+'.sav','rb') as savefile:
locations= pickle.load(savefile)
effsel= pickle.load(savefile)
distmods= pickle.load(savefile)
with open('../essf/maps/essf_rgb_marshall06_modelmh_feh'+str(round(fehbin[0],1))+'_age'+str(round(agebin[0],1))+'.sav','rb') as savefile:
mlocations= pickle.load(savefile)
meffsel= pickle.load(savefile)
mdistmods= pickle.load(savefile)
# Fill in regions not covered by Marshall map
meffsel[meffsel < -0.5]= effsel[meffsel < -0.5]
if fitIndx is None:
fitIndx= numpy.ones(len(mlocations),dtype='bool') #True-betwDiskIndx
locations, effsel, distmods = np.array(mlocations)[fitIndx], np.array(meffsel)[fitIndx], mdistmods
#get the density function and set it up to find the normalisation (surfdens=True)
rdensfunc= _setup_densfunc(type)
densfunc= lambda x: rdensfunc(x,None,None,params=params, surfdens=True)
#evaluate surface density at R0 for the density normalisation (always 1. if R_b > R0)
R0 = densprofiles._R0
Rb = np.exp(params[3])
dens_norm = densfunc(densprofiles._R0)
#set up the density function again with surfdens=False for the rate calculation
rdensfunc= _setup_densfunc(type)
densfunc= lambda x,y,z: rdensfunc(x,y,z,params=params, surfdens=False)
ds= 10.**(distmods/5.-2.)
#imply the distance cut if distance_cut == True
if distance_cut == True:
distmods = distmods[ds <= 3.]
ds= ds[ds <= 3.]
effsel = effsel[:,:len(ds)]
#Compute the grid of R, phi and Z for each location
Rgrid, phigrid, zgrid= [], [], []
for loc in locations:
lcen, bcen= apo.glonGlat(loc)
XYZ= bovy_coords.lbd_to_XYZ(lcen*numpy.ones_like(ds),
bcen*numpy.ones_like(ds),
ds,
degree=True)
Rphiz= bovy_coords.XYZ_to_galcencyl(XYZ[:,0],XYZ[:,1],XYZ[:,2],
Xsun=define_rgbsample._R0,
Zsun=define_rgbsample._Z0)
Rgrid.append(Rphiz[:,0])
phigrid.append(Rphiz[:,1])
zgrid.append(Rphiz[:,2])
Rgrid= numpy.array(Rgrid)
phigrid= numpy.array(phigrid)
zgrid= numpy.array(zgrid)
# Now compute rate(R) for each location and combine
effsel*= numpy.tile(ds**2.*(distmods[1]-distmods[0])*(ds*np.log(10)/5.),(effsel.shape[0],1))
tdens= densfunc(Rgrid,phigrid,zgrid)/dens_norm
rate= tdens*effsel
sumrate = np.sum(rate)
#calculate normalisation N(R0)
norm = (nbin/sumrate)
#convert units (Kpc^2 > pc^2, deg > rad etc)
norm *= 1e-6*(180/np.pi)**2
#compute mass in bin using values from isochrones
bin_mass = (norm*av_mass)/m_ratio
if verbose==True:
print bin_mass
return bin_mass, norm, m_ratio, (av_mass*1e-6*(180/np.pi)**2)/(sumrate*m_ratio)
def calculate_bin_error(samples, fehbin, agebin, nbin, iso_grid,
type='brokenexpflare',
loggcut=[1.8,3.0],
teffcut=[4000,5000],
n_sampling=1000,
progress=True,
mp=True,
fitIndx=None,
weights = 'padova',
distance_cut = False,
lowermass = None):
randsamples = np.random.permutation(samples.T)[:n_sampling]
m_sample = np.zeros(np.shape(randsamples)[0])
if multi == False:
for ii,params in enumerate(randsamples):
if progress==True:
print ''+str(round(float(ii)/float(n_sampling)*100,2))+'% complete!'
m = calc_normalisation(params, nbin , iso_grid, fehbin = fehbin, agebin=agebin, loggcut=loggcut, teffcut=teffcut, type=type, verbose=False, fitIndx=fitIndx, gridfile=gridfile, weights=weights, distance_cut = distance_cut, lowermass=lowermass)[0]
m_sample[ii] = m
if mp == True:
m_sample= multi.parallel_map((lambda x: calc_normalisation(randsamples[x], nbin, iso_grid, fehbin=fehbin, agebin=agebin,loggcut=loggcut, teffcut=teffcut, type=type, verbose=False, fitIndx=fitIndx, distance_cut=distance_cut, lowermass=lowermass)[0]),\
range(np.shape(randsamples)[0]),numcores=numpy.amin([np.shape(randsamples)[0], multiprocessing.cpu_count()/2]))
median = np.percentile(m_sample, 50)
lowerr = np.percentile(m_sample, 16)
uperr = np.percentile(m_sample, 84)
return m_sample, median, lowerr, uperr
```
#### File: apogee-maps/py/calc_mass.py
```python
import numpy
import fitDens
import densprofiles
def calcDiskMass(data,samples,
locations,effsel,distmods,
type='tribrokenexpflare'):
"""
NAME:
calcDiskMass
PURPOSE:
calculate the local surface density for a set of density profiles
INPUT:
data - the data array
samples - an array [nparam,ndens] of density-profile parameters
locations - locations of the APOGEE effective selection function
effsel - array (nloc,nD) of the effective selection function, includes area of the field
distmods - grid of distance moduli on which the effective selection function is pre-computed
type= ('exp') type of density profile to use
OUTPUT:
local surface density in Msol/pc^2
HISTORY:
2015-04-29 - Written - Bovy (IAS)
"""
# Setup the density function and its initial parameters
densfunc= fitDens._setup_densfunc(type)
# Setup the integration of the effective volume
effsel, Rgrid, phigrid, zgrid= \
fitDens._setup_effvol(locations,effsel,distmods)
out= []
for sample in samples.T:
# Setup the density function, fix the normalization for Rb < R0
if 'tribroken' in type and numpy.exp(sample[3]) < densprofiles._R0:
norm= numpy.exp(-(sample[0]+sample[2])\
*(numpy.exp(sample[3])-densprofiles._R0))
else:
norm= 1.
tdensfunc= lambda x,y,z: densfunc(x,y,z,params=sample)*norm
out.append(calcDiskMass_single(data,tdensfunc,
effsel,Rgrid,phigrid,zgrid))
return numpy.array(out)*12500.
def calcDiskMass_single(data,
densfunc,effsel,Rgrid,phigrid,zgrid):
"""
NAME:
calcDiskMass_single
PURPOSE:
calculate the local surface density for a single density profile
INPUT:
data - the data array
densfunc - function that returns the density when called with R,phi,z
effsel - array (nloc,nD), includes D^2*Delta D factor
Rgrid, phigrid, zgrid - array (nloc,nD) of the cylindrical Galactocentric coordinates corresponding to the (l_loc,b_loc,D) of the effective selection function
OUTPUT:
local surface density in Msol/pc^2
HISTORY:
2015-04-29 - Written - Bovy (IAS)
"""
# Calculate the effective volume
pred= fitDens.effvol(densfunc,effsel,Rgrid,phigrid,zgrid)
# Area included in effvol is in deg^2
return len(data)/pred/10.**6.*(180./numpy.pi)**2.
```
#### File: apogee-maps/py/define_rgbsample.py
```python
import math
import numpy
import statsmodels.api as sm
lowess= sm.nonparametric.lowess
import esutil
from galpy.util import bovy_coords, bovy_plot
from scipy.interpolate import interp1d,UnivariateSpline
import apogee.tools.read as apread
import isodist
import numpy as np
import matplotlib.pyplot as plt
import os
import pickle
import apogee.tools.read as apread
from apogee.select import apogeeSelect
from astropy.io import fits
from astropy.table import Table, join
_R0= 8. # kpc
_Z0= 0.025 # kpc
_FEHTAG= 'FE_H'
_AFETAG= 'AVG_ALPHAFE'
_AFELABEL= r'$[\left([\mathrm{O+Mg+Si+S+Ca}]/5\right)/\mathrm{Fe}]$'
catpath = '../catalogues/'
selectFile= '../savs/selfunc-nospdata.sav'
if os.path.exists(selectFile):
with open(selectFile,'rb') as savefile:
apo= pickle.load(savefile)
def get_rgbsample(loggcut = [1.8, 3.0],
teffcut = [0, 10000],
add_ages = False,
agetype='Martig',
apply_corrections=False,
distance_correction=False,
verbose = False):
"""
Get a clean sample of dr12 APOGEE data with Michael Haydens distances
---
INPUT:
None
OUTPUT:
Clean rgb sample with added distances
HISTORY:
Started - Mackereth 02/06/16
"""
#get the allStar catalogue using apogee python (exlude all bad flags etc)
allStar = apread.allStar(rmcommissioning=True,
exclude_star_bad=True,
exclude_star_warn=True,
main=True,
ak=True,
adddist=False)
#cut to a 'sensible' logg range (giants which are not too high on the RGB)
allStar = allStar[(allStar['LOGG'] > loggcut[0])&(allStar['LOGG'] < loggcut[1])&
(allStar['TEFF'] > teffcut[0])&(allStar['TEFF'] < teffcut[1])]
if verbose == True:
print str(len(allStar))+' Stars before Distance catalogue join (after Log(g) cut)'
#load the distance VAC
dists = fits.open(catpath+'DR12_DIST_R-GC.fits')[1].data
#convert to astropy Table
allStar_tab = Table(data=allStar)
dists_tab = Table(data=dists)
#join table
tab = join(allStar_tab, dists_tab, keys='APOGEE_ID', uniq_col_name='{col_name}{table_name}', table_names=['','2'])
data = tab.as_array()
data= esutil.numpy_util.add_fields(data,[('M_J', float),
('M_H', float),
('M_K', float),
('MH50_DIST', float),
('MH50_GALR', float),
('MH50_GALZ', float),
('MH50_GALPHI', float),
('AVG_ALPHAFE', float)])
data['MH50_DIST'] = (10**((data['HAYDEN_DISTMOD_50']+5)/5))/1e3
if distance_correction == True:
data['MH50_DIST'] *= 1.05
XYZ= bovy_coords.lbd_to_XYZ(data['GLON'],
data['GLAT'],
data['MH50_DIST'],
degree=True)
RphiZ= bovy_coords.XYZ_to_galcencyl(XYZ[:,0],
XYZ[:,1],
XYZ[:,2],
Xsun=8.,Zsun=0.025)
data['MH50_GALR']= RphiZ[:,0]
data['MH50_GALPHI']= RphiZ[:,1]
data['MH50_GALZ']= RphiZ[:,2]
data['M_J'] = data['J0']-data['HAYDEN_DISTMOD_50']
data['M_H'] = data['H0']-data['HAYDEN_DISTMOD_50']
data['M_K'] = data['K0']-data['HAYDEN_DISTMOD_50']
data['AVG_ALPHAFE'] = avg_alphafe_dr12(data)
data[_FEHTAG] += -0.1
#remove locations not in the apogee selection function (FIND OUT WHATS UP HERE)
data = data[np.in1d(data['LOCATION_ID'], apo.list_fields())]
# Remove locations outside of the Pan-STARRS dust map
# In the Southern hemisphere
data= data[data['LOCATION_ID'] != 4266] #240,-18
data= data[data['LOCATION_ID'] != 4331] #5.5,-14.2
data= data[data['LOCATION_ID'] != 4381] #5.2,-12.2
data= data[data['LOCATION_ID'] != 4332] #1,-4
data= data[data['LOCATION_ID'] != 4329] #0,-5
data= data[data['LOCATION_ID'] != 4351] #0,-2
data= data[data['LOCATION_ID'] != 4353] #358,0
data= data[data['LOCATION_ID'] != 4385] #358.6,1.4
# Close to the ecliptic pole where there's no data (is it the ecliptic pole?
data= data[data['LOCATION_ID'] != 4528] #120,30
data= data[data['LOCATION_ID'] != 4217] #123,22.4
#remove any non-finite magnitudes
data = data[np.isfinite(data['M_H'])]
if verbose == True:
print str(len(data))+' Stars with distance measures (and in good fields...)'
if add_ages == True:
if agetype == 'Martig':
ages = fits.open(catpath+'DR12_martigages_vizier.fits')[1].data
idtag = '2MASS_ID'
if agetype == 'Cannon':
ages = fits.open(catpath+'RGB_Cannon_Ages.fits')[1].data
ages = esutil.numpy_util.add_fields(ages,[('Age', float)])
ages['Age'] = np.exp(ages['ln_age'])
idtag = 'ID'
ages_tab = Table(data=ages)
ages_tab.rename_column(idtag, 'APOGEE_ID')
tab = join( ages_tab,data, keys='APOGEE_ID', uniq_col_name='{col_name}{table_name}', table_names=['','2'])
allStar_full = tab.as_array()
data = allStar_full
if verbose == True:
print str(len(data))+' Stars with ages'
if apply_corrections == True:
#martig1 = np.genfromtxt(catpath+'martig2016_table1.txt', dtype=None, names=True, skip_header=2)
martig1 = fits.open(catpath+'martig_table1.fits')
fit = lowess(np.log10(martig1['Age_out']),np.log10(martig1['Age_in']))
xs = np.linspace(-0.3,1.2,100)
xsinterpolate = interp1d(xs,xs)
fys = fit[:,0]-xsinterpolate(fit[:,1])
interp = UnivariateSpline(fit[:,1], fys)
corr_age = np.log10(data['Age'])+(interp(np.log10(data['Age'])))
corr_age = 10**corr_age
data['Age'] = corr_age
return data
def avg_alphafe_dr12(data):
weight_o= np.ones(len(data))
weight_s= np.ones(len(data))
weight_si= np.ones(len(data))
weight_ca= np.ones(len(data))
weight_mg= np.ones(len(data))
weight_o[data['O_H'] == -9999.0]= 0.
weight_s[data['S_H'] == -9999.0]= 0.
weight_si[data['SI_H'] == -9999.0]= 0.
weight_ca[data['CA_H'] == -9999.0]= 0.
weight_mg[data['MG_H'] == -9999.0]= 0.
return (weight_o*data['O_H']+weight_s*data['S_H']
+weight_si*data['SI_H']+weight_ca*data['CA_H']
+weight_mg*data['MG_H'])/(weight_o+weight_s
+weight_si+weight_ca
+weight_mg)\
-data['FE_H']-0.05
# Define the low-alpha, low-iron sample
def _lowlow_lowfeh(afe):
# The low metallicity edge
return -0.6
def _lowlow_highfeh(afe):
# The high metallicity edge
return -0.25
def _lowlow_lowafe(feh):
# The low alpha edge (-0.15,-0.075) to (-0.5,0)
return (0--0.075)/(-0.5--0.15)*(feh+0.1--0.15)-0.075
def _lowlow_highafe(feh):
# The high alpha edge (-0.15,0.075) to (-0.5,0.15)
return (0.15-0.075)/(-0.5--0.15)*(feh+0.1--0.15)+0.075
def get_lowlowsample():
"""
NAME:
get_lowlowsample
PURPOSE:
get the RGB sample at low alpha, low iron
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-18 - Started - Bovy (IAS)
2016-07-02 - modification - Mackereth (LJMU)
"""
# Get the full sample first
data= get_rgbsample()
# Now cut it
lowfeh= _lowlow_lowfeh(0.)
highfeh= _lowlow_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _lowlow_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _lowlow_highafe(data[_FEHTAG]))
return data[indx]
# Define the high-alpha sample
def _highalpha_lowfeh(afe):
# The low metallicity edge
return -0.8
def _highalpha_highfeh(afe):
# The high metallicity edge
return -0.2
def _highalpha_lowafe(feh):
# The low alpha edge (-0.125,0.115) to (-0.6,0.215)
return (0.2-0.1)/(-0.6--0.125)*(feh+0.1--0.125)+0.115
def _highalpha_highafe(feh):
# The high alpha edge (-0.125,0.19) to (-0.6,0.29)
return (0.275-0.175)/(-0.6--0.125)*(feh+0.1--0.125)+0.19
def get_highalphasample():
"""
NAME:
get_highalphasample
PURPOSE:
get the RC sample at high alpha
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-24 - Started - Bovy (IAS)
"""
# Get the full sample first
data= get_rcsample()
# Now cut it
lowfeh= _highalpha_lowfeh(0.)
highfeh= _highalpha_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _highalpha_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _highalpha_highafe(data[_FEHTAG]))
return data[indx]
# Define the solar sample
def _solar_lowfeh(afe):
# The low metallicity edge
return -0.2
def _solar_highfeh(afe):
# The high metallicity edge
return 0.
def _solar_lowafe(feh):
# The low alpha edge (0.1,-0.075) to (-0.1,-0.075)
return -0.075
def _solar_highafe(feh):
# The high alpha edge (-0.15,0.1) to (0.1,0.05)
return (0.1-0.05)/(-0.15-0.1)*(feh+0.1-0.1)+0.05
def get_solarsample():
"""
NAME:
get_solarsample
PURPOSE:
get the RC sample at solar abundances
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-18 - Started - Bovy (IAS)
2016-07-02 - modification - Mackereth (LJMU)
"""
# Get the full sample first
data= get_rgbsample()
# Now cut it
lowfeh= _solar_lowfeh(0.)
highfeh= _solar_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _solar_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _solar_highafe(data[_FEHTAG]))
return data[indx]
# Define the high metallicity sample
def _highfeh_lowfeh(afe):
# The low metallicity edge
return 0.05
def _highfeh_highfeh(afe):
# The high metallicity edge
return 0.3
def _highfeh_lowafe(feh):
# The low alpha edge (0.1,-0.075) to (-0.1,-0.075)
return -0.075
def _highfeh_highafe(feh):
# The high alpha edge (-0.15,0.1) to (0.1,0.05)
return 0.05
def get_highfehsample():
"""
NAME:
get_highfehsample
PURPOSE:
get the RC sample at high [Fe/H]
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-18 - Started - Bovy (IAS)
2016-07-02 - modification - Mackereth (LJMU)
"""
# Get the full sample first
data= get_rgbsample()
# Now cut it
lowfeh= _highfeh_lowfeh(0.)
highfeh= _highfeh_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _highfeh_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _highfeh_highafe(data[_FEHTAG]))
return data[indx]
def alphaedge(fehs):
edge = np.zeros(len(fehs))
edge[fehs < 0] = (0.12/-0.6)*fehs[fehs < 0]+0.03
edge[fehs >= 0] = 0.03
return edge
def highalphaedge(fehs):
edge = np.zeros(len(fehs))
edge[fehs < 0] = (-0.13/0.6)*fehs[fehs < 0]+0.04
edge[fehs >= 0] = 0.04
return edge
def lowalphaedge(fehs):
edge = np.zeros(len(fehs))
edge[fehs < 0] = (-0.10/0.6)*fehs[fehs < 0]+0.01
edge[fehs >= 0] = 0.01
return edge
def get_fehage(agebin = [0.,1.], fehbin = [0.,0.2], afebin = 'low', dr=None, agetype='Martig', apply_corrections=False, distance_correction=False):
data = get_rgbsample(add_ages=True, agetype=agetype, apply_corrections=apply_corrections, distance_correction=distance_correction)
if afebin == 'low':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] < alphaedge(data[_FEHTAG]))
if afebin == 'high':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] >= alphaedge(data[_FEHTAG]))
if afebin == 'highclean':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] >= highalphaedge(data[_FEHTAG]))
if afebin == 'lowclean':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] <= lowalphaedge(data[_FEHTAG]))
if afebin == 'lownew':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] <= alphaedge(data[_FEHTAG])-0.025)
if afebin == 'highnew':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] >= alphaedge(data[_FEHTAG])+0.025)
if afebin == None:
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])
return data[indx]
def highalphalocus():
data= get_rgbsample()
indx= (data[_AFETAG] > (0.2-0.1)/(-0.6--0.125)*(data[_FEHTAG]+0.1--0.125)+0.11)\
*(data[_FEHTAG] < -0.225)\
+(data[_AFETAG] > 0.05/(-0.6--0.125)*(data[_FEHTAG]+0.1--0.125)+0.11)\
*(data[_FEHTAG] >= -0.225)*(data[_FEHTAG] < 0.125)\
+(data[_FEHTAG] >= 0.125)
return lowess(data[_AFETAG][indx],data[_FEHTAG][indx],frac=0.6)
def lowalphalocus():
data= get_rgbsample()
indx= (data[_AFETAG] > (0.2-0.1)/(-0.6--0.125)*(data[_FEHTAG]+0.1--0.125)+0.11)\
*(data[_FEHTAG] < -0.025)\
+(data[_AFETAG] > 0.05/(-0.6--0.125)*(data[_FEHTAG]+0.1--0.125)+0.11)\
*(data[_FEHTAG] >= -0.225)*(data[_FEHTAG] < 0.125)
return lowess(data[_AFETAG][True-indx],data[_FEHTAG][True-indx],frac=0.6)
class MAPs:
"""Class that pixelizes the data sample in [Fe/H] and [a/Fe]"""
def __init__(self,data=None,dfeh=0.1,dafe=0.05,fehmin=-0.75,fehmax=0.35,
afemin=-0.075,afemax=0.275):
"""
NAME:
__init__
PURPOSE:
initialize the MAPs
INPUT:
data= (None) the data sample; if None, whole stat. RC sample
dfeh, dafe= pixel size
fehmin, fehmax, afemin, afemax= minimum and maximum FeH and AFe
OUTPUT:
object with pixelized data
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
if data is None: data= get_rcsample()
self.data= data
self.dx= dfeh
self.dy= dafe
self.xmin= fehmin
self.xmax= fehmax
self.ymin= afemin
self.ymax= afemax
# edges in X and Y
self.xedges= numpy.arange(self.xmin,self.xmax+0.01,self.dx)
self.yedges= numpy.arange(self.ymin,self.ymax+0.01,self.dy)
# X and Y
self.x= data[_FEHTAG]
self.y= data[_AFETAG]
return None
def __call__(self,*args,**kwargs):
"""
NAME:
__call__
PURPOSE:
return the part of the sample in a (feh,afe) pixel
INPUT:
[Fe/H]
[a/Fe]
OUTPUT:
returns data recarray in the bin that feh and afe are in
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
#Find bin
xbin= int(math.floor((args[0]-self.xmin)/self.dx))
ybin= int(math.floor((args[1]-self.ymin)/self.dy))
#Return data
return self.data[(self.x > self.xedges[xbin])\
*(self.x <= self.xedges[xbin+1])\
*(self.y > self.yedges[ybin])\
*(self.y <= self.yedges[ybin+1])]
def map(self):
"""
NAME:
map
PURPOSE:
yield a map
INPUT:
(none)
OUTPUT:
iterates over the MAPs
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
nx= int((self.xmax-self.xmin)/self.dx)
ny= int((self.ymax-self.ymin)/self.dy)
gx= numpy.linspace(self.xmin+self.dx/2.,self.xmax-self.dx/2.,nx)
gy= numpy.linspace(self.ymin+self.dy/2.,self.ymax-self.dy/2.,ny)
for ii in range(nx):
for jj in range(ny):
yield self(gx[ii],gy[jj])
def callIndx(self,*args,**kwargs):
"""
NAME:
callIndx
PURPOSE:
return index of the part of the sample in an [Fe/H] and [a/Fe] pixel
INPUT:
[Fe/H]
[a/Fe]
OUTPUT:
returns index into data recarray in the bin that [Fe/H] and [a/Fe] are in
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
#Find bin
xbin= int(math.floor((args[0]-self.xmin)/self.dx))
ybin= int(math.floor((args[1]-self.ymin)/self.dy))
#Return data
return (self.x > self.xedges[xbin])\
*(self.x <= self.xedges[xbin+1])\
*(self.y > self.yedges[ybin])\
*(self.y <= self.yedges[ybin+1])
def xindx(self,x):
"""
NAME:
xindx
PURPOSE:
return the index corresponding to a [Fe/H] value
INPUT:
[Fe/H]
OUTPUT:
index
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
return int(math.floor((x-self.xmin)/self.dx))
def yindx(self,y):
"""
NAME:
yindx
PURPOSE:
return the index corresponding to a [a/Fe] value
INPUT:
[a/Fe]
OUTPUT:
index
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
return int(math.floor((y-self.ymin)/self.dy))
def plot(self,quant,func=numpy.median,minnstar=20.,submediany=False,
returnz=False,justcalc=False,
**kwargs):
"""
NAME:
plot
PURPOSE:
make a plot of a quantity as a function of X and Y
INPUT:
quant - the quantity (string that returns the quantity, like
'METALS') or a function of the data
func - function of quantity to plot
minnstar= minimum number of stars (20)
submeany= subtract the median y
justcalc= (False) if True, do not plot
bovy_plot.bovy_dens2d kwargs
OUTPUT:
plot to output device
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
#First create 2D
nx= int((self.xmax-self.xmin)/self.dx)
ny= int((self.ymax-self.ymin)/self.dy)
gx= numpy.linspace(self.xmin+self.dx/2.,self.xmax-self.dx/2.,nx)
gy= numpy.linspace(self.ymin+self.dy/2.,self.ymax-self.dy/2.,ny)
z2d= numpy.empty((nx,ny))
if isinstance(quant,numpy.ndarray):
z2d= numpy.reshape(quant,(nx,ny))
for ii in range(z2d.shape[0]):
for jj in range(z2d.shape[1]):
tdata= self(gx[ii],gy[jj])
if len(tdata) < minnstar:
z2d[ii,jj]= numpy.nan
else:
nbins= 0
for ii in range(z2d.shape[0]):
for jj in range(z2d.shape[1]):
tdata= self(gx[ii],gy[jj])
if len(tdata) < minnstar:
z2d[ii,jj]= numpy.nan
else:
nbins+= 1
if hasattr(quant, '__call__'):
z2d[ii,jj]= func(quant(tdata))
else:
z2d[ii,jj]= func(tdata[quant])
if submediany:
z2d[ii,:]-= \
numpy.median(z2d[ii,True-numpy.isnan(z2d[ii,:])])
if justcalc:
if returnz:
return z2d
else:
return None
#Now plot
xrange= kwargs.pop('xrange',[self.xmin,self.xmax])
yrange= kwargs.pop('yrange',[self.ymin,self.ymax])
if not kwargs.has_key('colorbar'):
kwargs['colorbar']= True
if not kwargs.has_key('shrink'):
kwargs['shrink']= 0.78
if not kwargs.has_key('vmin'):
kwargs['vmin']= numpy.nanmin(z2d)
if not kwargs.has_key('vmax'):
kwargs['vmax']= numpy.nanmax(z2d)
xlabel= r'$[\mathrm{Fe/H}]$'
ylabel= _AFELABEL
cmap= kwargs.pop('cmap','coolwarm')
out= bovy_plot.bovy_dens2d(z2d.T,origin='lower',cmap=cmap,
interpolation='nearest',
xlabel=xlabel,ylabel=ylabel,
xrange=xrange,yrange=yrange,
**kwargs)
if returnz:
return z2d
else:
return out
```
#### File: apogee-maps/py/fitDens.py
```python
import os, os.path
import pickle
import numpy
from scipy import optimize
from galpy.util import bovy_coords
import bovy_mcmc
import acor
import densprofiles
import define_rcsample
def fitDens(data,
locations,effsel,distmods,
type='exp',
mcmc=False,nsamples=10000,
verbose=True,
init=None,
retMaxL=False,
pos_keys = ['<KEY> <KEY> <KEY>']):
"""
NAME:
fitDens
PURPOSE:
fit the density profile of a set of stars
INPUT:
data - recarray with the data
locations - locations of the APOGEE effective selection function
effsel - array (nloc,nD) of the effective selection function, includes area of the field
distmods - grid of distance moduli on which the effective selection function is pre-computed
type= ('exp') type of density profile to fit
mcmc= (False) run MCMC or not
nsamples= (10000) number of MCMC samples to obtain
verbose= (True) set this to False for no optimize convergence messages
init= (None) if set, these are the initial conditions
retMaxL= (False) if True, return the maximum likelihood
pos_keys = the keys in the data table to R, phi and Z
OUTPUT:
(best-fit, samples, maximum-likelihood) based on options
HISTORY:
2015-03-24 - Written - Bovy (IAS)
2017-03-04 - added pos_keys kwarg - Mackereth (ARI)
"""
# Setup the density function and its initial parameters
densfunc= _setup_densfunc(type)
if init is None:
init= _setup_initparams_densfunc(type,data, pos_keys = pos_keys)
# Setup the integration of the effective volume
effsel, Rgrid, phigrid, zgrid= _setup_effvol(locations,effsel,distmods)
# Get the data's R,phi,z
dataR= data[pos_keys[0]]
dataphi= data[pos_keys[1]]
dataz= data[pos_keys[2]]
# Optimize
out= optimize.fmin(lambda x: _mloglike(x,densfunc,type,
dataR,dataphi,dataz,
effsel,Rgrid,phigrid,zgrid),
init,disp=verbose)
if 'explinflare' in type:
step= [0.2,0.2,0.2,0.2,0.02] # need small step for Rfinv
else:
step= 0.2
if mcmc:
samples= bovy_mcmc.markovpy(out,
step,
lambda x: loglike(x,densfunc,type,
dataR,dataphi,dataz,
effsel,Rgrid,
phigrid,zgrid),
(),
isDomainFinite=[[False,False] for ii in range(len(out))],
domain= [[0.,0.] for ii in range(len(out))],
nsamples=nsamples,
nwalkers=2*len(out))
if verbose: print_samples_qa(samples)
out= (out,numpy.array(samples).T,)
else:
out= (out,)
if retMaxL:
out= out+(loglike(out[0],densfunc,type,dataR,dataphi,dataz,
effsel,Rgrid,
phigrid,zgrid),)
return out
def _mloglike(*args,**kwargs):
"""Minus the log likelihood"""
return -loglike(*args,**kwargs)
def loglike(params,densfunc,type,
dataR,dataphi,dataz,
effsel,Rgrid,phigrid,zgrid):
"""
NAME:
loglike
PURPOSE:
compute the log likelihood of the data given a density profile
INPUT:
params - parameters of the density
densfunc - a function that evaluates the density as densfunc(R,phi,z,params=params)
dataR, dataphi, dataz - the cylindrical Galactocentric coordinates of the data
effsel - array (nloc,nD), includes D^2*Delta D factor
Rgrid, phigrid, zgrid - array (nloc,nD) of the cylindrical Galactocentric coordinates corresponding to the (l_loc,b_loc,D) of the effective selection function
OUTPUT:
log likelihood
HISTORY:
2015-03-24 - Written - Bovy (IAS)
"""
# Check priors
if not _check_range_params_densfunc(params,type):
return -numpy.finfo(numpy.dtype(numpy.float64)).max
#raise NotImplementedError("Need to implement priors")
# Setup the density function
tdensfunc= lambda x,y,z: densfunc(x,y,z,params=params)
# Evaluate the log density at the data
datadens= numpy.log(tdensfunc(dataR,dataphi,dataz))
# Evaluate the effective volume
teffvol= effvol(tdensfunc,effsel,Rgrid,phigrid,zgrid)
# Put it all together
return numpy.sum(datadens)-len(dataR)*numpy.log(teffvol)
def effvol(densfunc,effsel,Rgrid,phigrid,zgrid):
"""
NAME:
effvol
PURPOSE:
calculate the effective volume
INPUT:
densfunc - function that returns the density when called with R,phi,z
effsel - array (nloc,nD), includes D^2*Delta D factor
Rgrid, phigrid, zgrid - array (nloc,nD) of the cylindrical Galactocentric coordinates corresponding to the (l_loc,b_loc,D) of the effective selection function
OUTPUT:
effective volume
HISTORY:
2015-03-24 - Written - Bovy (IAS)
"""
# Evaluate the density
tdens= densfunc(Rgrid,phigrid,zgrid)
return numpy.sum(effsel*tdens)
############################## DENSITY FUNCTIONS #############################
def _setup_densfunc(type):
"""Return the density function for this type"""
if type.lower() == 'exp':
return densprofiles.expdisk
elif type.lower() == 'expplusconst':
return densprofiles.expdiskplusconst
elif type.lower() == 'twoexp':
return densprofiles.twoexpdisk
elif type.lower() == 'brokenexp':
return densprofiles.brokenexpdisk
elif type.lower() == 'tribrokenexp':
return densprofiles.tribrokenexpdisk
elif type.lower() == 'symbrokenexp':
return densprofiles.symbrokenexpdisk
elif type.lower() == 'brokenexpflare':
return densprofiles.brokenexpflaredisk
elif type.lower() == 'tribrokenexpflare':
return densprofiles.tribrokenexpflaredisk
elif type.lower() == 'tribrokenexpfixedflare':
return densprofiles.tribrokenexpfixedflaredisk
elif type.lower() == 'brokentwoexp':
return densprofiles.brokentwoexpdisk
elif type.lower() == 'brokentwoexpflare':
return densprofiles.brokentwoexpflaredisk
elif type.lower() == 'tribrokentwoexp':
return densprofiles.tribrokentwoexpdisk
elif type.lower() == 'gaussexp':
return densprofiles.gaussexpdisk
elif type.lower() == 'brokenquadexp':
return densprofiles.brokenquadexpdisk
elif type.lower() == 'symbrokenquadexp':
return densprofiles.symbrokenquadexpdisk
elif type.lower() == 'brokenexpfixedspiral':
return densprofiles.brokenexpdiskfixedspiral
elif type.lower() == 'tribrokenexplinflare':
return densprofiles.tribrokenexplinflaredisk
elif type.lower() == 'tribrokenexpinvlinflare':
return densprofiles.tribrokenexpinvlinflaredisk
def _setup_initparams_densfunc(type,data, pos_keys=['<KEY>', '<KEY>', '<KEY>']):
"""Return the initial parameters of the density for this type, might depend on the data"""
if type.lower() == 'exp':
return [1./3.,1./0.3]
elif type.lower() == 'expplusconst':
return [1./3.,1./0.3,numpy.log(0.1)]
elif type.lower() == 'twoexp':
return [1./3.,1./0.3,1./4.,1./0.5,densprofiles.logit(0.5)]
elif type.lower() == 'brokenexp':
return [-1./3.,1./0.3,1./3.,numpy.log(numpy.median(data[pos_keys[0]]))]
elif type.lower() == 'tribrokenexp':
return [1./3.,1./0.3,1./3.,numpy.log(numpy.median(data[pos_keys[0]]))]
elif type.lower() == 'symbrokenexp':
return [0.4,1./0.3,numpy.log(10.)]
elif type.lower() == 'brokenexpflare':
return [-1./3.,1./0.3,1./3.,numpy.log(numpy.median(data[pos_keys[0]])),
-1./5.]
elif type.lower() == 'tribrokenexpflare':
return [1./3.,1./0.3,1./3.,numpy.log(numpy.median(data[pos_keys[0]])),
-1./5.]
elif type.lower() == 'tribrokenexpfixedflare':
return [1./3.,1./0.3,1./3.,numpy.log(numpy.median(data[pos_keys[0]]))]
elif type.lower() == 'brokentwoexp':
return [-1./3.,1./0.3,1./3.,numpy.log(numpy.median(data[pos_keys[0]])),
densprofiles.logit(0.5),1./0.8]
elif type.lower() == 'brokentwoexpflare':
return [-1./3.,1./0.3,1./3.,numpy.log(numpy.median(data[pos_keys[0]])),
densprofiles.logit(0.5),1./0.8,-0.2]
elif type.lower() == 'tribrokentwoexp':
return [1./3.,1./0.3,1./3.,numpy.log(numpy.median(data[pos_keys[0]])),
densprofiles.logit(0.5),1./0.8]
elif type.lower() == 'gaussexp':
return [1./3.,1./0.3,numpy.log(10.)]
elif type.lower() == 'brokenquadexp':
return [1./3.,1./0.3,1./3.,numpy.log(10.)]
elif type.lower() == 'symbrokenquadexp':
return [1./3.,1./0.3,numpy.log(10.)]
elif type.lower() == 'brokenexpfixedspiral':
return [1./6.,1./0.3,1./2.,numpy.log(14.),numpy.log(1.)]
elif type.lower() == 'tribrokenexplinflare':
return [1./3.,1./0.3,1./3.,numpy.log(numpy.median(data[pos_keys[0]])),
0.]
elif type.lower() == 'tribrokenexpinvlinflare':
return [1./3.,1./0.3,1./3.,numpy.log(numpy.median(data[pos_keys[0]])),
-1./5.]
def _check_range_params_densfunc(params,type):
"""Check that the current parameters are in a reasonable range (prior)"""
# Second parameter is always a scale height, which we don't allow neg.
if params[1] < 0.: return False
if type.lower() == 'exp':
if numpy.fabs(params[0]) > 2.: return False
if numpy.fabs(params[1]) > 20.: return False
if numpy.exp(params[2]) > 1.: return False
elif type.lower() == 'expplusconst':
if numpy.fabs(params[0]) > 2.: return False
if numpy.fabs(params[1]) > 20.: return False
if numpy.exp(params[2]) > 1.: return False
if params[2] < -20.: return False
elif type.lower() == 'twoexp':
if numpy.fabs(params[0]) > 2.: return False
if numpy.fabs(params[1]) > 20.: return False
if numpy.fabs(params[2]) > 2.: return False
if numpy.fabs(params[3]) > 20.: return False
elif type.lower() == 'brokenexp':
if numpy.fabs(params[0]) > 2.: return False
if numpy.fabs(params[1]) > 20.: return False
if numpy.fabs(params[2]) > 2.: return False
if numpy.exp(params[3]) > 16.: return False
if numpy.exp(params[3]) < 1.: return False
elif type.lower() == 'tribrokenexp':
if params[0] < 0.: return False
if params[0] > 2.: return False
if numpy.fabs(params[1]) > 20.: return False
if params[2] < 0.: return False
if params[2] > 2.: return False
if numpy.exp(params[3]) > 16.: return False
if numpy.exp(params[3]) < 1.: return False
elif type.lower() == 'symbrokenexp':
if params[0] < 0.: return False
if params[0] > 2.: return False
if numpy.fabs(params[1]) > 20.: return False
if numpy.exp(params[2]) > 16.: return False
if numpy.exp(params[2]) < 1.: return False
elif type.lower() == 'brokenexpflare':
if numpy.fabs(params[0]) > 2.: return False
if numpy.fabs(params[1]) > 20.: return False
if numpy.fabs(params[2]) > 2.: return False
if numpy.exp(params[3]) > 16.: return False
if numpy.exp(params[3]) < 1.: return False
elif type.lower() == 'tribrokenexpflare':
if params[0] < 0.: return False
if params[0] > 2.: return False
if numpy.fabs(params[1]) > 20.: return False
if params[2] < 0.: return False
if params[2] > 2.: return False
if numpy.exp(params[3]) > 16.: return False
if numpy.exp(params[3]) < 1.: return False
elif type.lower() == 'tribrokenexpfixedflare':
if params[0] < 0.: return False
if params[0] > 2.: return False
if numpy.fabs(params[1]) > 20.: return False
if params[2] < 0.: return False
if params[2] > 2.: return False
if numpy.exp(params[3]) > 16.: return False
if numpy.exp(params[3]) < 1.: return False
elif type.lower() == 'brokentwoexp':
if numpy.fabs(params[0]) > 2.: return False
if numpy.fabs(params[1]) > 20.: return False
if numpy.fabs(params[2]) > 2.: return False
if numpy.exp(params[3]) > 16.: return False
if numpy.exp(params[3]) < 1.: return False
if params[4] < -7.: return False
if params[4] > 0.: return False #make 2nd less dominant
if params[5] < 0.: return False
if numpy.fabs(params[5]) > 20.: return False
elif type.lower() == 'brokentwoexpflare':
if numpy.fabs(params[0]) > 2.: return False
if numpy.fabs(params[1]) > 20.: return False
if numpy.fabs(params[2]) > 2.: return False
if numpy.exp(params[3]) > 16.: return False
if numpy.exp(params[3]) < 1.: return False
if params[4] < -7.: return False
if params[4] > 0.: return False #make 2nd less dominant
if params[5] < 0.: return False
if numpy.fabs(params[5]) > 20.: return False
elif type.lower() == 'tribrokentwoexp':
if params[0] < 0.: return False
if params[0] > 2.: return False
if numpy.fabs(params[1]) > 20.: return False
if params[2] < 0.: return False
if params[2] > 2.: return False
if numpy.exp(params[3]) > 16.: return False
if numpy.exp(params[3]) < 1.: return False
if params[4] < -7.: return False
if params[4] > 0.: return False #make 2nd less dominant
if params[5] < 0.: return False
if params[5] > 20.: return False
elif type.lower() == 'gaussexp':
if params[0] < 0.: return False
if params[0] > 2.: return False
if numpy.fabs(params[1]) > 20.: return False
if numpy.exp(params[2]) > 16.: return False
if numpy.exp(params[2]) < 1.: return False
elif type.lower() == 'brokenquadexp':
if params[0] < 0.: return False
if params[0] > 2.: return False
if numpy.fabs(params[1]) > 20.: return False
if params[2] < 0.: return False
if params[2] > 2.: return False
if numpy.exp(params[3]) > 16.: return False
if numpy.exp(params[3]) < 1.: return False
elif type.lower() == 'symbrokenquadexp':
if params[0] < 0.: return False
if params[0] > 2.: return False
if numpy.fabs(params[1]) > 20.: return False
if numpy.exp(params[2]) > 16.: return False
if numpy.exp(params[2]) < 1.: return False
elif type.lower() == 'brokenexpfixedspiral':
if numpy.fabs(params[0]) > 2.: return False
if numpy.fabs(params[1]) > 20.: return False
if numpy.fabs(params[2]) > 2.: return False
if numpy.exp(params[3]) > 16.: return False
if numpy.exp(params[3]) < 1.: return False
elif type.lower() == 'tribrokenexplinflare':
if params[0] < 0.: return False
if params[0] > 2.: return False
if numpy.fabs(params[1]) > 20.: return False
if params[2] < 0.: return False
if params[2] > 2.: return False
if numpy.exp(params[3]) > 16.: return False
if numpy.exp(params[3]) < 1.: return False
elif type.lower() == 'tribrokenexpinvlinflare':
if params[0] < 0.: return False
if params[0] > 2.: return False
if numpy.fabs(params[1]) > 20.: return False
if params[2] < 0.: return False
if params[2] > 2.: return False
if numpy.exp(params[3]) > 16.: return False
if numpy.exp(params[3]) < 1.: return False
return True
########################### EFFECTIVE VOLUME SETUP ############################
def _setup_effvol(locations,effsel,distmods):
# First restore the APOGEE selection function (assumed pre-computed)
selectFile= '../savs/selfunc-nospdata.sav'
if os.path.exists(selectFile):
with open(selectFile,'rb') as savefile:
apo= pickle.load(savefile)
# Now compute the necessary coordinate transformations
ds= 10.**(distmods/5-2.)
Rgrid, phigrid, zgrid= [], [], []
for loc in locations:
lcen, bcen= apo.glonGlat(loc)
XYZ= bovy_coords.lbd_to_XYZ(lcen*numpy.ones_like(ds),
bcen*numpy.ones_like(ds),
ds,
degree=True)
Rphiz= bovy_coords.XYZ_to_galcencyl(XYZ[:,0],XYZ[:,1],XYZ[:,2],
Xsun=define_rcsample._R0,
Zsun=define_rcsample._Z0)
Rgrid.append(Rphiz[:,0])
phigrid.append(Rphiz[:,1])
zgrid.append(Rphiz[:,2])
Rgrid= numpy.array(Rgrid)
phigrid= numpy.array(phigrid)
zgrid= numpy.array(zgrid)
# Also need to multiply in distance factors
effsel*= numpy.tile(ds**3.*(distmods[1]-distmods[0]),(effsel.shape[0],1))
return (effsel,Rgrid,phigrid,zgrid)
# From vclos before...
def print_samples_qa(samples):
print "Mean, standard devs, acor tau, acor mean, acor s ..."
for kk in range(len(samples[0])):
xs= numpy.array([s[kk] for s in samples])
#Auto-correlation time
tau, m, s= acor.acor(xs)
print numpy.mean(xs), numpy.std(xs), tau, m, s
```
#### File: apogee-maps/py/fitMAPs.py
```python
import sys
import os, os.path
import pickle
import copy
import numpy
from galpy.util import save_pickles
import define_rcsample
import fitDens
_NSAMPLES= 50000
# Globals
locations= None
distmods= None
effsel= None
effsel_mar= None
effsel_drim= None
effsel_sale= None
effsel_zero= None
def fitMAPs(type,savefilename):
setup_selection_function()
if os.path.exists(savefilename):
with open(savefilename,'rb') as savefile:
bf= pickle.load(savefile)
samples= pickle.load(savefile)
bf_g15= pickle.load(savefile)
samples_g15= pickle.load(savefile)
bf_zero= pickle.load(savefile)
samples_zero= pickle.load(savefile)
bii= pickle.load(savefile)
else:
bf= []
samples= []
bf_g15= []
samples_g15= []
bf_zero= []
samples_zero= []
bii= 0
maps= define_rcsample.MAPs()
for ii, map in enumerate(maps.map()):
if ii < bii: continue
tbf, ts= fitmap(map,type=type,dmap='marshall06')
bf.append(tbf)
samples.append(ts)
tbf, ts= fitmap(map,type=type,dmap='green15')
bf_g15.append(tbf)
samples_g15.append(ts)
tbf, ts= fitmap(map,type=type,dmap='zero')
bf_zero.append(tbf)
samples_zero.append(ts)
print ii, numpy.median(samples[-1],axis=1)
save_pickles(savefilename,bf,samples,
bf_g15,samples_g15,
bf_zero,samples_zero,
ii+1)
return None
def fitmap(tdata,type='brokenexp',dmap='marshall06'):
tlocations= copy.deepcopy(locations)
tdistmods= copy.deepcopy(distmods)
if dmap == 'green15':
teffsel= copy.deepcopy(effsel)
elif dmap.lower() == 'marshall06':
teffsel= copy.deepcopy(effsel_mar)
elif dmap.lower() == 'zero':
teffsel= copy.deepcopy(effsel_zero)
bf, samples= fitDens.fitDens(tdata,
numpy.array(tlocations),
copy.deepcopy(teffsel),
tdistmods,type=type,verbose=False,
mcmc=True,nsamples=_NSAMPLES)
bf= fitDens.fitDens(tdata,
numpy.array(tlocations),
copy.deepcopy(teffsel),
tdistmods,type=type,verbose=False,
init=numpy.median(samples,axis=1))
return (bf, samples)
def setup_selection_function():
# Green et al.
global locations, distmods, effsel, effsel_mar, effsel_drim, effsel_sale
global effsel_zero
with open('../essf/essf_green15.sav','rb') as savefile:
locations= pickle.load(savefile)
effsel= pickle.load(savefile)
distmods= pickle.load(savefile)
# Marshall et al. (2006)
with open('../essf/essf_marshall06.sav','rb') as savefile:
locations= pickle.load(savefile)
effsel_mar= pickle.load(savefile)
# Fill in regions not covered by Sale map
effsel_mar[effsel_mar < -0.5]= effsel[effsel_mar < -0.5]
# Sale et al. (2014)
with open('../essf/essf_sale14.sav','rb') as savefile:
locations= pickle.load(savefile)
effsel_sale= pickle.load(savefile)
# Fill in regions not covered by Marshall map
effsel_sale[effsel_sale < -0.5]= effsel[effsel_sale < -0.5]
# Drimmel et al (2003)
with open('../essf/essf_drimmel03.sav','rb') as savefile:
locations= pickle.load(savefile)
effsel_drim= pickle.load(savefile)
# Zero
with open('../essf/essf_zero.sav','rb') as savefile:
locations= pickle.load(savefile)
effsel_zero= pickle.load(savefile)
return None
if __name__ == '__main__':
fitMAPs(sys.argv[1],sys.argv[2])
```
#### File: apogee-maps/py/mockDensData.py
```python
import os, os.path
import pickle
import multiprocessing
from optparse import OptionParser
import numpy
from scipy import ndimage
import fitsio
from galpy.util import bovy_coords, multi
import mwdust
import define_rcsample
import fitDens
import densprofiles
dmap= None
dmapg15= None
apo= None
def generate(locations,
type='exp',
sample='lowlow',
extmap='green15',
nls=101,
nmock=1000,
H0=-1.49,
_dmapg15=None,
ncpu=1):
"""
NAME:
generate
PURPOSE:
generate mock data following a given density
INPUT:
locations - locations to be included in the sample
type= ('exp') type of density profile to sample from
sample= ('lowlow') for selecting mock parameters
extmap= ('green15') extinction map to use ('marshall06' and others use Green15 to fill in unobserved regions)
nls= (101) number of longitude bins to use for each field
nmock= (1000) number of mock data points to generate
H0= (-1.49) absolute magnitude (can be array w/ sampling spread)
ncpu= (1) number of cpus to use to compute the probability
OUTPUT:
mockdata recarray with tags 'RC_GALR_H', 'RC_GALPHI_H', 'RC_GALZ_H'
HISTORY:
2015-04-03 - Written - Bovy (IAS)
"""
if isinstance(H0,float): H0= [H0]
# Setup the density function and its initial parameters
rdensfunc= fitDens._setup_densfunc(type)
mockparams= _setup_mockparams_densfunc(type,sample)
densfunc= lambda x,y,z: rdensfunc(x,y,z,params=mockparams)
# Setup the extinction map
global dmap
global dmapg15
if _dmapg15 is None: dmapg15= mwdust.Green15(filter='2MASS H')
else: dmapg15= _dmapg15
if isinstance(extmap,mwdust.DustMap3D.DustMap3D):
dmap= extmap
elif extmap.lower() == 'green15':
dmap= dmapg15
elif extmap.lower() == 'marshall06':
dmap= mwdust.Marshall06(filter='2MASS H')
elif extmap.lower() == 'sale14':
dmap= mwdust.Sale14(filter='2MASS H')
elif extmap.lower() == 'drimmel03':
dmap= mwdust.Drimmel03(filter='2MASS H')
# Use brute-force rejection sampling to make no approximations
# First need to estimate the max probability to use in rejection;
# Loop through all locations and compute sampling probability on grid in
# (l,b,D)
# First restore the APOGEE selection function (assumed pre-computed)
global apo
selectFile= '../savs/selfunc-nospdata.sav'
if os.path.exists(selectFile):
with open(selectFile,'rb') as savefile:
apo= pickle.load(savefile)
# Now compute the necessary coordinate transformations and evaluate the
# maximum probability
distmods= numpy.linspace(7.,15.5,301)
ds= 10.**(distmods/5-2.)
nbs= nls
lnprobs= numpy.empty((len(locations),len(distmods),nbs,nls))
radii= []
lcens, bcens= [], []
lnprobs= multi.parallel_map(lambda x: _calc_lnprob(locations[x],nls,nbs,
ds,distmods,
H0,
densfunc),
range(len(locations)),
numcores=numpy.amin([len(locations),
multiprocessing.cpu_count(),ncpu]))
lnprobs= numpy.array(lnprobs)
for ll, loc in enumerate(locations):
lcen, bcen= apo.glonGlat(loc)
rad= apo.radius(loc)
radii.append(rad) # save for later
lcens.append(lcen[0])
bcens.append(bcen[0])
maxp= (numpy.exp(numpy.nanmax(lnprobs))-10.**-8.)*1.1 # Just to be sure
# Now generate mock data using rejection sampling
nout= 0
arlocations= numpy.array(locations)
arradii= numpy.array(radii)
arlcens= numpy.array(lcens)
arbcens= numpy.array(bcens)
out= numpy.recarray((nmock,),
dtype=[('RC_DIST_H','f8'),
('RC_DM_H','f8'),
('RC_GALR_H','f8'),
('RC_GALPHI_H','f8'),
('RC_GALZ_H','f8')])
while nout < nmock:
nnew= 2*(nmock-nout)
# nnew new locations
locIndx= numpy.floor(numpy.random.uniform(size=nnew)*len(locations)).astype('int')
newlocations= arlocations[locIndx]
# Point within these locations
newds_coord= numpy.random.uniform(size=nnew)
newds= 10.**((newds_coord*(numpy.amax(distmods)-numpy.amin(distmods))\
+numpy.amin(distmods))/5.-2.)
newdls_coord= numpy.random.uniform(size=nnew)
newdls= newdls_coord*2.*arradii[locIndx]\
-arradii[locIndx]
newdbs_coord= numpy.random.uniform(size=nnew)
newdbs= newdbs_coord*2.*arradii[locIndx]\
-arradii[locIndx]
newr2s= newdls**2.+newdbs**2.
keepIndx= newr2s < arradii[locIndx]**2.
newlocations= newlocations[keepIndx]
newds_coord= newds_coord[keepIndx]
newdls_coord= newdls_coord[keepIndx]
newdbs_coord= newdbs_coord[keepIndx]
newds= newds[keepIndx]
newdls= newdls[keepIndx]
newdbs= newdbs[keepIndx]
newls= newdls+arlcens[locIndx][keepIndx]
newbs= newdbs+arbcens[locIndx][keepIndx]
# Reject?
tps= numpy.zeros_like(newds)
for nloc in list(set(newlocations)):
lindx= newlocations == nloc
pindx= arlocations == nloc
coord= numpy.array([newds_coord[lindx]*(len(distmods)-1.),
newdbs_coord[lindx]*(nbs-1.),
newdls_coord[lindx]*(nls-1.)])
tps[lindx]= \
numpy.exp(ndimage.interpolation.map_coordinates(\
lnprobs[pindx][0],
coord,cval=-10.,
order=1))-10.**-8.
XYZ= bovy_coords.lbd_to_XYZ(newls,newbs,newds,degree=True)
Rphiz= bovy_coords.XYZ_to_galcencyl(XYZ[:,0],XYZ[:,1],XYZ[:,2],
Xsun=define_rcsample._R0,
Ysun=0.,
Zsun=define_rcsample._Z0)
testp= numpy.random.uniform(size=len(newds))*maxp
keepIndx= tps > testp
if numpy.sum(keepIndx) > nmock-nout:
rangeIndx= numpy.zeros(len(keepIndx),dtype='int')
rangeIndx[keepIndx]= numpy.arange(numpy.sum(keepIndx))
keepIndx*= (rangeIndx < nmock-nout)
out['RC_DIST_H'][nout:nout+numpy.sum(keepIndx)]= newds[keepIndx]
out['RC_DM_H'][nout:nout+numpy.sum(keepIndx)]= newds_coord[keepIndx]*(numpy.amax(distmods)-numpy.amin(distmods))\
+numpy.amin(distmods)
out['RC_GALR_H'][nout:nout+numpy.sum(keepIndx)]= Rphiz[0][keepIndx]
out['RC_GALPHI_H'][nout:nout+numpy.sum(keepIndx)]= Rphiz[1][keepIndx]
out['RC_GALZ_H'][nout:nout+numpy.sum(keepIndx)]= Rphiz[2][keepIndx]
nout= nout+numpy.sum(keepIndx)
return (out,lnprobs)
def _setup_mockparams_densfunc(type,sample):
"""Return the parameters of the mock density for this type"""
if type.lower() == 'exp':
if sample.lower() == 'lowlow':
return [0.,1./0.3]
elif sample.lower() == 'solar':
return [1./3.,1./0.3]
else:
return [1./3.,1./0.3]
elif type.lower() == 'expplusconst':
if sample.lower() == 'lowlow':
return [0.,1./0.3,numpy.log(0.1)]
else:
return [1./3.,1./0.3,numpy.log(0.1)]
elif type.lower() == 'twoexp':
return [1./3.,1./0.3,1./4.,1./0.5,densprofiles.logit(0.5)]
elif type.lower() == 'brokenexp':
if sample.lower() == 'lowlow':
return [-0.2,1./.3,0.2,numpy.log(11.)]
elif sample.lower() == 'solar':
return [-1./6.,1./0.3,1./2.,numpy.log(8.)]
else:
return [-1./6.,1./0.3,1./2.,numpy.log(6.)]
elif type.lower() == 'brokenexpflare':
if sample.lower() == 'lowlow':
return [-0.2,1./.3,0.2,numpy.log(11.),-0.1]
elif sample.lower() == 'solar':
return [-1./6.,1./0.3,1./2.,numpy.log(8.),-0.1]
else:
return [-1./6.,1./0.3,1./2.,numpy.log(6.),-0.1]
elif type.lower() == 'gaussexp':
if sample.lower() == 'lowlow':
return [.4,1./0.3,numpy.log(11.)]
else:
return [1./3.,1./0.3,numpy.log(10.)]
def _calc_lnprob(loc,nls,nbs,ds,distmods,H0,densfunc):
lcen, bcen= apo.glonGlat(loc)
rad= apo.radius(loc)
ls= numpy.linspace(lcen-rad,lcen+rad,nls)
bs= numpy.linspace(bcen-rad,bcen+rad,nbs)
# Tile these
tls= numpy.tile(ls,(len(ds),len(bs),1))
tbs= numpy.swapaxes(numpy.tile(bs,(len(ds),len(ls),1)),1,2)
tds= numpy.tile(ds,(len(ls),len(bs),1)).T
XYZ= bovy_coords.lbd_to_XYZ(tls.flatten(),
tbs.flatten(),
tds.flatten(),
degree=True)
Rphiz= bovy_coords.XYZ_to_galcencyl(XYZ[:,0],XYZ[:,1],XYZ[:,2],
Xsun=define_rcsample._R0,
Ysun=0.,
Zsun=define_rcsample._Z0)
# Evaluate probability density
tH= numpy.tile(distmods.T,(1,len(ls),len(bs),1))[0].T
for ii in range(tH.shape[1]):
for jj in range(tH.shape[2]):
try:
tH[:,ii,jj]+= dmap(ls[jj],bs[ii],ds)
except (IndexError, TypeError,ValueError):
try:
tH[:,ii,jj]+= dmapg15(ls[jj],bs[ii],ds)
except IndexError: # assume zero outside
pass
tH= tH.flatten()+H0[0]
ps= densfunc(Rphiz[0],Rphiz[1],Rphiz[2])*apo(loc,tH)\
*numpy.fabs(numpy.cos(tbs.flatten()/180.*numpy.pi))\
*tds.flatten()**3.
return numpy.log(numpy.reshape(ps,(len(distmods),nbs,nls))\
+10.**-8.)
def get_options():
usage = "usage: %prog [options] <savefilename>\n\nsavefilename= name of the file that the mock data will be saved to"
parser = OptionParser(usage=usage)
parser.add_option("--type",dest='type',default='exp',
help="Type of density profile")
parser.add_option("--sample",dest='sample',default='lowlow',
help="Sample parameter for mock parameters")
parser.add_option("--H0",dest='H0',default=-1.49,type='float',
help="RC absolute magnitude")
parser.add_option("--nls",dest='nls',default=101,type='int',
help="Number of longitudes to bin each field in")
parser.add_option("--nmock",dest='nmock',default=20000,type='int',
help="Number of mock samples to generate")
# Dust map to use
parser.add_option("--extmap",dest='extmap',default='green15',
help="Dust map to use ('Green15', 'Marshall03', 'Drimmel03', 'Sale14', or 'zero'")
# Multiprocessing?
parser.add_option("-m","--multi",dest='multi',default=1,type='int',
help="number of cpus to use")
return parser
if __name__ == '__main__':
parser= get_options()
options, args= parser.parse_args()
data= define_rcsample.get_rcsample()
locations= list(set(list(data['LOCATION_ID'])))
#locations= [4240,4242]
out= generate(locations,
type=options.type,
sample=options.sample,
extmap=options.extmap,
nls=options.nls,
nmock=options.nmock,
H0=options.H0,
ncpu=options.multi)
fitsio.write(args[0],out[0],clobber=True)
```
#### File: apogee-maps/py/plot_ah_location.py
```python
import os, os.path
import sys
import pickle
import numpy
import matplotlib
matplotlib.use('Agg')
from galpy.util import save_pickles, bovy_plot
from matplotlib import rc, pyplot
import mwdust
import apogee.select.apogeeSelect
from define_rcsample import get_rcsample
_PLOTDIST= True
_LW= 1.5
def plot_ah_location(location,plotname):
# Setup selection function
selectFile= '../savs/selfunc-nospdata.sav'
if os.path.exists(selectFile):
with open(selectFile,'rb') as savefile:
apo= pickle.load(savefile)
else:
# Setup selection function
apo= apogee.select.apogeeSelect()
# Delete these because they're big and we don't need them
del apo._specdata
del apo._photdata
save_pickles(selectFile,apo)
glon, glat= apo.glonGlat(location)
glon= glon[0]
glat= glat[0]
ahFile= '../savs/ah-%i.sav' % location
if not os.path.exists(ahFile):
# Distances at which to calculate the extinction
distmods= numpy.linspace(7.,15.5,301)
ds= 10.**(distmods/5-2.)
# Setup Green et al. (2015) dust map
gd= mwdust.Green15(filter='2MASS H')
pa, ah= gd.dust_vals_disk(glon,glat,ds,apo.radius(location))
meanah_default= numpy.sum(numpy.tile(pa,(len(ds),1)).T*ah,axis=0)/numpy.sum(pa)
stdah_default= numpy.sqrt(numpy.sum(numpy.tile(pa,(len(ds),1)).T\
*ah**2.,axis=0)\
/numpy.sum(pa)-meanah_default**2.)
# Marshall et al. (2006)
marshall= mwdust.Marshall06(filter='2MASS H')
try:
pa, ah= marshall.dust_vals_disk(glon,glat,ds,apo.radius(location))
except IndexError:
meanah_marshall= -numpy.ones_like(ds)
stdah_marshall= -numpy.ones_like(ds)
else:
meanah_marshall= numpy.sum(numpy.tile(pa,(len(ds),1)).T*ah,
axis=0)/numpy.sum(pa)
stdah_marshall= numpy.sqrt(numpy.sum(numpy.tile(pa,(len(ds),1)).T\
*ah**2.,axis=0)\
/numpy.sum(pa)-meanah_marshall**2.)
if True:
# Drimmel et al. (2003)
drimmel= mwdust.Drimmel03(filter='2MASS H')
pa, ah= drimmel.dust_vals_disk(glon,glat,ds,apo.radius(location))
meanah_drimmel= numpy.sum(numpy.tile(pa,(len(ds),1)).T*ah,axis=0)/numpy.sum(pa)
stdah_drimmel= numpy.sqrt(numpy.sum(numpy.tile(pa,(len(ds),1)).T\
*ah**2.,axis=0)\
/numpy.sum(pa)-meanah_drimmel**2.)
else:
meanah_drimmel= -numpy.ones_like(ds)
stdah_drimmel= -numpy.ones_like(ds)
if True:
# Sale et al. (2014)
sale= mwdust.Sale14(filter='2MASS H')
try:
pa, ah= sale.dust_vals_disk(glon,glat,ds,apo.radius(location))
meanah_sale= numpy.sum(numpy.tile(pa,(len(ds),1)).T*ah,
axis=0)/numpy.sum(pa)
except (TypeError,ValueError):
meanah_sale= -numpy.ones_like(ds)
stdah_sale= -numpy.ones_like(ds)
else:
stdah_sale= numpy.sqrt(numpy.sum(numpy.tile(pa,(len(ds),1)).T\
*ah**2.,axis=0)\
/numpy.sum(pa)-meanah_sale**2.)
else:
meanah_sale= -numpy.ones_like(ds)
stdah_sale= -numpy.ones_like(ds)
save_pickles(ahFile,distmods,meanah_default,stdah_default,
meanah_marshall,stdah_marshall,
meanah_drimmel,stdah_drimmel,
meanah_sale,stdah_sale)
else:
with open(ahFile,'rb') as savefile:
distmods= pickle.load(savefile)
meanah_default= pickle.load(savefile)
stdah_default= pickle.load(savefile)
meanah_marshall= pickle.load(savefile)
stdah_marshall= pickle.load(savefile)
meanah_drimmel= pickle.load(savefile)
stdah_drimmel= pickle.load(savefile)
meanah_sale= pickle.load(savefile)
stdah_sale= pickle.load(savefile)
# Now plot
bovy_plot.bovy_print(fig_height=3.)
if _PLOTDIST:
distmods= 10.**(distmods/5-2.)
xrange= [0.,12.]
xlabel=r'$D\,(\mathrm{kpc})$'
else:
xrange=[7.,15.8],
xlabel=r'$\mathrm{distance\ modulus}\ \mu$'
ylabel=r'$A_H$'
yrange= [0.,1.2*numpy.amax(numpy.vstack((meanah_default+stdah_default,
meanah_marshall+stdah_marshall,
meanah_drimmel+stdah_drimmel,
meanah_sale+stdah_sale)))]
line_default= bovy_plot.bovy_plot(distmods,meanah_default,
'b-',lw=_LW,zorder=12,
xrange=xrange,
xlabel=xlabel,
yrange=yrange,
ylabel=ylabel)
pyplot.fill_between(distmods,
meanah_default-stdah_default,
meanah_default+stdah_default,
hatch='/',facecolor=(0,0,0,0),
color='b',lw=0.25,zorder=4)
line_marshall= bovy_plot.bovy_plot(distmods,meanah_marshall,'r-',lw=_LW,
overplot=True,
zorder=8)
pyplot.fill_between(distmods,
meanah_marshall-stdah_marshall,
meanah_marshall+stdah_marshall,
hatch='\\',facecolor=(0,0,0,0),
color='r',lw=0.25,zorder=2)
line_drimmel= bovy_plot.bovy_plot(distmods,meanah_drimmel,'-',lw=_LW,
color='gold',
overplot=True,
zorder=7)
pyplot.fill_between(distmods,
meanah_drimmel-stdah_drimmel,
meanah_drimmel+stdah_drimmel,
hatch='///',facecolor=(0,0,0,0),
color='gold',lw=0.25,zorder=1)
line_sale= bovy_plot.bovy_plot(distmods,meanah_sale,'-',lw=_LW,
color='c',
overplot=True,
zorder=9)
pyplot.fill_between(distmods,
meanah_sale-stdah_sale,
meanah_sale+stdah_sale,
hatch='//',facecolor=(0,0,0,0),
color='c',lw=0.25,zorder=3)
if True:
data= get_rcsample()
data= data[data['LOCATION_ID'] == location]
bovy_plot.bovy_plot(data['RC_DIST'],data['AK_TARG']*1.55,
'ko',zorder=20,overplot=True,ms=2.)
if location == 4318:
pyplot.legend((line_default[0],line_sale[0]),
(r'$\mathrm{Green\ et\ al.\ (2015)}$',
r'$\mathrm{Sale\ et\ al.\ (2014)}$'),
loc='lower right',#bbox_to_anchor=(.91,.375),
numpoints=8,
prop={'size':14},
frameon=False)
elif location == 4242:
pyplot.legend((line_marshall[0],line_drimmel[0]),
(r'$\mathrm{Marshall\ et\ al.\ (2006)}$',
r'$\mathrm{Drimmel\ et\ al.\ (2003)}$'),
loc='lower right',#bbox_to_anchor=(.91,.375),
numpoints=8,
prop={'size':14},
frameon=False)
# Label
lcen, bcen= apo.glonGlat(location)
if numpy.fabs(bcen) < 0.1: bcen= 0.
bovy_plot.bovy_text(r'$(l,b) = (%.1f,%.1f)$' % (lcen,bcen),
top_right=True,size=16.)
bovy_plot.bovy_end_print(plotname,dpi=300,
bbox_extra_artists=pyplot.gca().get_children(),
bbox_inches='tight')
return None
if __name__ == '__main__':
#4240 is 30,0
plot_ah_location(int(sys.argv[1]),sys.argv[2])
```
#### File: apogee-maps/py/plot_broadsurfdens.py
```python
import sys
import pickle
import numpy
import matplotlib
matplotlib.use('Agg')
from galpy.util import bovy_plot
from matplotlib import pyplot
import densprofiles
_SKIP= 10
_SIGNIF= 0.025
def plot_broadsurfdens(plotname):
broads= ['lowlow','solar','highfeh','highalpha']
#anorms= [2.,.25,0.01,50.]
anorms= [0.004,.20,5.,50.]
bovy_plot.bovy_print(fig_width=8.,fig_height=3.)
overplot= False
for ii, broad in enumerate(broads):
# Restore the fits
savename= '../broadfits/%s.sav' % broad
with open(savename,'rb') as savefile:
bf_exp= pickle.load(savefile)
bf_brexp= pickle.load(savefile)
bf_twoexp= pickle.load(savefile)
ml_exp= pickle.load(savefile)
ml_brexp= pickle.load(savefile)
ml_twoexp= pickle.load(savefile)
samples_exp= pickle.load(savefile)
samples_brexp= pickle.load(savefile)
samples_twoexp= pickle.load(savefile)
# Create all density profiles
Rs= numpy.linspace(4.,14.,1001)
if broad.lower() == 'highalpha':
samples= samples_exp[:,::_SKIP]
nsamples= len(samples[0])
tRs= numpy.tile(Rs,(nsamples,1)).T
ihRin= numpy.tile(samples[0],(len(Rs),1))
ldp= -ihRin*(tRs-densprofiles._R0)
else:
samples= samples_brexp[:,::_SKIP]
nsamples= len(samples[0])
tRs= numpy.tile(Rs,(nsamples,1)).T
ldp= numpy.empty((len(Rs),nsamples))
Rb= numpy.tile(numpy.exp(samples[3]),(len(Rs),1))
ihRin= numpy.tile(samples[0],(len(Rs),1))
ihRout= numpy.tile(samples[2],(len(Rs),1))
# Rb >= R0
leRb= (tRs <= Rb)*(Rb >= densprofiles._R0)
ldp[leRb]= ihRin[leRb]*(tRs[leRb]-densprofiles._R0)
gtRb= (tRs > Rb)*(Rb >= densprofiles._R0)
ldp[gtRb]= -ihRout[gtRb]*(tRs[gtRb]-densprofiles._R0)\
+ihRout[gtRb]*(Rb[gtRb]-densprofiles._R0)\
+ihRin[gtRb]*(Rb[gtRb]-densprofiles._R0)
# Rb < R0
leRb= (tRs <= Rb)*(Rb < densprofiles._R0)
ldp[leRb]= ihRin[leRb]*(tRs[leRb]-densprofiles._R0)\
-ihRout[leRb]*(Rb[leRb]-densprofiles._R0)\
-ihRin[leRb]*(Rb[leRb]-densprofiles._R0)
gtRb= (tRs > Rb)*(Rb < densprofiles._R0)
ldp[gtRb]= -ihRout[gtRb]*(tRs[gtRb]-densprofiles._R0)
norm= numpy.exp(numpy.median(ldp,axis=1))[numpy.argmin(numpy.fabs(Rs-densprofiles._R0))]/anorms[ii]
bovy_plot.bovy_plot(Rs,numpy.exp(numpy.median(ldp,axis=1))/norm,
'k-',
lw=2.,overplot=overplot,
xlabel=r'$R\,(\mathrm{kpc})$',
ylabel=r'$\Sigma(R)\times\mathrm{constant}$',
xrange=[0.,16.],
yrange=[0.0003,900.],
semilogy=True)
pyplot.fill_between(Rs,
numpy.exp(numpy.sort(ldp,axis=1)[:,int(round(_SIGNIF*nsamples))])/norm,
numpy.exp(numpy.sort(ldp,axis=1)[:,int(round((1.-_SIGNIF)*nsamples))])/norm,
color='0.65',
lw=0.)
overplot= True
# Label
labelx= 1.
bovy_plot.bovy_text(labelx,5.,r'$\mathrm{high\ [Fe/H]}$',
size=15.,backgroundcolor='w')
bovy_plot.bovy_text(labelx,0.085,r'$\mathrm{solar}$',
size=15.,backgroundcolor='w')
bovy_plot.bovy_text(labelx,0.001,r'$\mathrm{low\ [Fe/H]}$',
size=15.,backgroundcolor='w')
bovy_plot.bovy_text(labelx,150.,r'$\mathrm{high}\ [\alpha/\mathrm{Fe}]$',
size=15.,backgroundcolor='w')
bovy_plot.bovy_end_print(plotname)
if __name__ == '__main__':
plot_broadsurfdens(sys.argv[1])
```
#### File: apogee-maps/py/plot_mapsurfdens.py
```python
import os, os.path
import sys
import csv
import pickle
import numpy
import matplotlib
matplotlib.use('Agg')
from galpy.util import bovy_plot
from matplotlib import pyplot, cm
import densprofiles
import define_rcsample
_SKIP= 10
_SIGNIF= 0.025
def plot_mapsurfdens(plotname):
with open('../mapfits/tribrokenexpflare.sav','rb') as savefile:
bf= numpy.array(pickle.load(savefile))
samples_brexp= numpy.array(pickle.load(savefile))
plotmaps= [9,16,23,29,36,43,50,57,64,71]
bovy_plot.bovy_print(fig_width=8.,fig_height=6.)
maps= define_rcsample.MAPs()
cmap= cm.coolwarm
overplot= False
Rs= numpy.linspace(4.,14.,1001)
# Setup for saving the profiles
csvfile= open(os.path.join('..','out','mapsurfdens.csv'),'w')
writer= csv.writer(csvfile,delimiter=',',quoting=csv.QUOTE_NONE)
writer.writerow(['# Surface density profile for MAPs (Figure 11 in Bovy et al. 2016)'])
writer.writerow(['# The first line lists the radii at which the surface density profiles'])
writer.writerow(['# are evaluated'])
writer.writerow(['# The rest of the file are the log surface profiles; the 0.025'])
writer.writerow(['# lower limit and the 0.0975 upper limit (each 1 line)'])
writer.writerow(['# Different MAPs are separated by a comment line'])
writer.writerow(['{:.2f}'.format(x) for x in Rs])
for ii, map in enumerate(maps.map()):
if not ii in plotmaps: continue
# Create all density profiles
samples= samples_brexp[ii,:,::_SKIP]
nsamples= len(samples[0])
tRs= numpy.tile(Rs,(nsamples,1)).T
ldp= numpy.empty((len(Rs),nsamples))
Rb= numpy.tile(numpy.exp(samples[3]),(len(Rs),1))
ihRin= numpy.tile(samples[0],(len(Rs),1))
ihRout= numpy.tile(samples[2],(len(Rs),1))
# Rb >= R0
leRb= (tRs <= Rb)*(Rb >= densprofiles._R0)
ldp[leRb]= ihRin[leRb]*(tRs[leRb]-densprofiles._R0)
gtRb= (tRs > Rb)*(Rb >= densprofiles._R0)
ldp[gtRb]= -ihRout[gtRb]*(tRs[gtRb]-densprofiles._R0)\
+ihRout[gtRb]*(Rb[gtRb]-densprofiles._R0)\
+ihRin[gtRb]*(Rb[gtRb]-densprofiles._R0)
# Rb < R0, normalize outer at R0
leRb= (tRs <= Rb)*(Rb < densprofiles._R0)
ldp[leRb]= ihRin[leRb]*(tRs[leRb]-densprofiles._R0)\
-ihRout[leRb]*(Rb[leRb]-densprofiles._R0)\
-ihRin[leRb]*(Rb[leRb]-densprofiles._R0)
gtRb= (tRs > Rb)*(Rb < densprofiles._R0)
ldp[gtRb]= -ihRout[gtRb]*(tRs[gtRb]-densprofiles._R0)
# Label and relative normalization
tfeh= round(numpy.median(map['FE_H'])*20.)/20.
if tfeh == 0.25: tfeh= 0.3
if tfeh == -0.0: tfeh= 0.0
if tfeh == -0.1: tfeh= -0.1
print ii, tfeh, len(map)
anorm= 10**(-10.*(tfeh+0.1))
if tfeh > 0.2: anorm= 10**(-12.*(tfeh+0.1))
if tfeh < -0.5: anorm= 10**(-12.*(tfeh+0.1))
anorm= 1./anorm # re-order
norm= numpy.exp(numpy.median(ldp,axis=1))[numpy.argmin(numpy.fabs(Rs-densprofiles._R0))]/anorm
bovy_plot.bovy_plot(Rs,numpy.exp(numpy.median(ldp,axis=1))/norm,
'-',
color=cmap((tfeh+0.6)*0.95/0.9+0.05),
lw=2.,overplot=overplot,
xlabel=r'$R\,(\mathrm{kpc})$',
ylabel=r'$\Sigma(R)\times\mathrm{constant}$',
xrange=[0.,16.],
yrange=[0.000000001,9000000.],
semilogy=True)
pyplot.fill_between(Rs,
numpy.exp(numpy.sort(ldp,axis=1)[:,int(round(_SIGNIF*nsamples))])/norm,
numpy.exp(numpy.sort(ldp,axis=1)[:,int(round((1.-_SIGNIF)*nsamples))])/norm,
color=cmap((tfeh+0.6)),
lw=0.)
overplot= True
if ii == 9:
bovy_plot.bovy_text(2.,
10.**6.,
r'$[\mathrm{Fe/H}]$',size=16.,color='k')
bovy_plot.bovy_text(2.,(numpy.exp(numpy.median(ldp,axis=1))/norm)[0],
r'$%+.1f$' % tfeh,size=16.,
color=cmap((tfeh+0.6)*0.95/0.9+0.05))
writer.writerow(['# Low-alpha MAP w/ [Fe/H]=%g' % tfeh])
writer.writerow(['{:.3f}'.format(x) for x in list(numpy.median(ldp,axis=1))])
writer.writerow(['{:.3f}'.format(x) for x in list(numpy.sort(ldp,axis=1)[:,int(round(_SIGNIF*nsamples))])])
writer.writerow(['{:.3f}'.format(x) for x in list(numpy.sort(ldp,axis=1)[:,int(round((1.-_SIGNIF)*nsamples))])])
csvfile.close()
bovy_plot.bovy_text(10.,10.**6.,
r'$\mathrm{low-}[\alpha/\mathrm{Fe}]\ \mathrm{MAPs}$',
size=16.)
bovy_plot.bovy_end_print(plotname)
if __name__ == '__main__':
plot_mapsurfdens(sys.argv[1])
``` |
{
"source": "jobovy/bovy_mcmc",
"score": 3
} |
#### File: bovy_mcmc/bovy_mcmc/_bovy_mcmc.py
```python
import scipy as sc
import scipy.stats as stats
##############################################################################
#
# bovy_mcmc.py: general mcmc methods
#
##############################################################################
def hmc(initial_theta,nleap,stepsize,obj_func,grad_func,func_params,nsamples=1):
"""
NAME:
hmc
PURPOSE:
general HMC routine (uses the standard kinetic energy)
INPUT:
initial_theta - initial state of the parameters
nleap - (int) number of leapfrog steps per HMC step
stepsize - (double) size of the steps to take in the orbit integration
obj_func - (function pointer) the objective function E(x,params) as in p(x) ~ exp(-E)
grad_func - (function pointer) the gradient of the objective function gradE(x,params)
func_params - (tuple) the parameters of the objective function
nsamples - (int) desired number of HMC samples
OUTPUT:
(a set of samples,acceptance fraction)
BUGS:
- does not use masses
- only uses the last sample
REVISION HISTORY:
2009-10-08 - Written - Bovy (NYU)
2009-10-29 - Rewritten and added to bovy_mcmc.py
"""
out=[]
try:
ntheta= len(initial_theta)
except TypeError:
ntheta= 1
E= obj_func(initial_theta,func_params)
grad= grad_func(initial_theta,func_params)
theta= initial_theta.copy()
naccept= 0.
for ii in range(nsamples):
p= stats.norm.rvs(size=ntheta)
H= 0.5*sc.dot(p,p) + E
newtheta= theta.copy()
newgrad= grad
#First move the momentum
p-= 0.5*newgrad*stepsize
for kk in range(nleap):
newtheta+= stepsize*p
newgrad= grad_func(newtheta,func_params)
p-= stepsize*newgrad/(1.+ (kk == (nleap-1)))#Full steps except for the last one
Enew= obj_func(newtheta,func_params)
Hnew= 0.5*sc.dot(p,p)+Enew
dH= Hnew - H
dH= dH * ( dH > 0 )
#Metropolis accept
if stats.uniform.rvs() < sc.exp(-dH):
theta= newtheta.copy()
E= Enew
grad= newgrad
naccept+= 1.
out.append(theta)
if nsamples == 1:
return (out[0],naccept)
else:
return (out,naccept/nsamples)
def metropolis(initial_theta,sample_proposal,eval_ln_proposal,
proposal_params,lnpdf,pdf_params,symmetric=False,
nsamples=1,callback=None):
"""
NAME:
metropolis
PURPOSE:
metropolis mcmc
INPUT:
initial_theta - initial sample
sample_proposal - given x and proposal_params, sample a proposal
using this function
eval_ln_proposal - given x and proposal_params, evaluate the log of
the proposal density
proposal_params - parameters for the proposal function
(e.g., typical steps)
lnpdf - function evaluating the log of the pdf to be sampled
pdf_params - parameters to pass to the pdf (tuple)
symmetric - (bool) if True, the proposal distribution is symmetric and will not be evaluated
nsamples - number of samples desired
callback - function of parameter to be called after each new sample
OUTPUT:
tuple consisting of
list of samples, number if nsamples=1
acceptance ratio, 1 or 0 if nsamples=1
REVISION HISTORY:
2009-10-30 - Written - Bovy (NYU)
2011-06-18 - Added doctest - Bovy
DOCTEST:
>>> import numpy as nu
>>> nu.random.seed(1)
>>> import scipy as sc
>>> from scipy import stats
>>> def lngaussian(x,mean,var):
... return -.5*sc.log(2.*sc.pi*var)-0.5*(x-mean)**2./var
>>> def sample_gaussian_proposal(mean,stddev):
... return stats.norm.rvs()*stddev+mean
>>> def eval_ln_gaussian_proposal(new,old,stddev):
... return -0.5*sc.log(2.*sc.pi*stddev**2.)-0.5*(old-new)**2./stddev**2.
>>> lnpdf= lngaussian
>>> pdf_params= (0.,1.)
>>> sample_proposal= sample_gaussian_proposal
>>> eval_ln_proposal= eval_ln_gaussian_proposal
>>> proposal_params= (2.,)
>>> symmetric=False
>>> initial_theta= 5.
>>> nsamples= 200000
>>> (samples,faccept)= metropolis(initial_theta,sample_proposal,eval_ln_proposal,proposal_params,lnpdf,pdf_params,symmetric=symmetric,nsamples=nsamples)
>>> print "%4.1f%% of the samples were accepted" % (100.*faccept)
50.0% of the samples were accepted
>>> samples= samples[nsamples/2:-1] #discard burn-in
>>> logprecision= -2.
>>> assert (nu.mean(samples)-0.)**2. < 10.**(logprecision*2.)
>>> assert (nu.std(samples)-1.)**2. < 10.**(logprecision*2.)
>>> assert (stats.moment(samples,3)-0.)**2. < 10.**(logprecision)
>>> assert (stats.moment(samples,4)-stats.norm.moment(4))**2. < 10.**(logprecision)
>>> from scipy import special
>>> def lnbeta(x,a,b):
... return (a-1.)*nu.log(x)+(b-1.)*nu.log(1-x)-special.betaln(a,b)
>>> def sample_beta_proposal(x):
... return nu.random.uniform()
>>> def eval_ln_beta_proposal(new,old):
... return 0.
>>> lnpdf= lnbeta
>>> pdf_params= (.5,.5)
>>> sample_proposal= sample_beta_proposal
>>> eval_ln_proposal= eval_ln_beta_proposal
>>> proposal_params= ()
>>> symmetric=False
>>> initial_theta= 0.5
>>> nsamples= 100000
>>> nu.random.seed(1)
>>> (samples,faccept)= metropolis(initial_theta,sample_proposal,eval_ln_proposal,proposal_params,lnpdf,pdf_params,symmetric=symmetric,nsamples=nsamples)
>>> print "%4.1f%% of the samples were accepted" % (100.*faccept)
72.5% of the samples were accepted
>>> samples= samples[nsamples/2:-1] #discard burn-in
>>> logprecision= -2.
>>> assert (nu.mean(samples)-stats.beta.moment(1,pdf_params[0],pdf_params[1]))**2. < 10.**(logprecision*2.)
>>> assert (nu.var(samples)-stats.beta.moment(2,pdf_params[0],pdf_params[1])+stats.beta.moment(1,pdf_params[0],pdf_params[1])**2.)**2. < 10.**(logprecision*2.)
"""
out= []
naccept= 0.
theta= initial_theta
logp= lnpdf(theta,*pdf_params)
for ii in range(nsamples):
newtheta= sample_proposal(theta,*proposal_params)
newlogp= lnpdf(newtheta,*pdf_params)
if symmetric:
extra_proposal_factor= 0.
else:
fromoldtonew= eval_ln_proposal(newtheta,theta,*proposal_params)
fromnewtoold= eval_ln_proposal(theta,newtheta,*proposal_params)
extra_proposal_factor= fromnewtoold-fromoldtonew
u=stats.uniform.rvs()
comp= newlogp-logp+extra_proposal_factor
comp*= (comp < 0)
if sc.log(u) < comp:
theta= newtheta
logp= newlogp
naccept+= 1.
if not callback is None: callback(theta)
out.append(theta)
if nsamples == 1:
return (out[0],naccept)
else:
return (out,naccept/nsamples)
def slice_double(x,u,step,lnpdf,pdf_params,isDomainFinite,domain):
"""
NAME:
slice_double
PURPOSE:
doubling technique to create the interval in slice sampling (Neal 2003)
INPUT:
x - current sample
u - current (log) height of the slice
step - step to take in stepping out
lnpdf - function evaluating the log of the pdf
pdf_params - parameters to be passed to the pdf
isDomainFinite - is the domain finite? [bool,bool]
domain - the domain if it is finite (has no effect if the domain is not finite)
OUTPUT:
(xl,xr) - lower and upper bound to the interval
REVISION HISTORY:
2009-10-29 - Written - Bovy (NYU)
"""
r= stats.uniform.rvs()
xl= x-r*step
xr= x+(1-r)*step
logpxl= lnpdf(xl,*pdf_params)
logpxr= lnpdf(xr,*pdf_params)
while logpxl > u or logpxr > u:
v= stats.uniform.rvs()
if v < .5:
xl-= (xr-xl)
if isDomainFinite[0] and xl < domain[0]:
xl= domain[0]
logpxl= u-1
else:
logpxl= lnpdf(xl,*pdf_params)
else:
xr+= (xr-xl)
if isDomainFinite[1] and xr > domain[1]:
xr= domain[1]
logpxr= u-1
else:
logpxr= lnpdf(xr,*pdf_params)
else:
if isDomainFinite[0] and xl < domain[0]:
xl= domain[0]
if isDomainFinite[1] and xr > domain[1]:
xr= domain[1]
return (xl,xr)
def slice_step_out(x,u,step,lnpdf,pdf_params,isDomainFinite,domain):
"""
NAME:
slice_step_out
PURPOSE:
stepping out technique to create the interval in slice sampling (Mackay 2003)
INPUT:
x - current sample
u - current (log) height of the slice
step - step to take in stepping out
lnpdf - function evaluating the log of the pdf
pdf_params - parameters to be passed to the pdf
isDomainFinite - is the domain finite? [bool,bool]
domain - the domain if it is finite (has no effect if the domain is not finite)
OUTPUT:
(xl,xr) - lower and upper bound to the interval
REVISION HISTORY:
2009-10-29 - Written - Bovy (NYU)
"""
r= stats.uniform.rvs()
xl= x-r*step
xr= x+(1-r)*step
while lnpdf(xl,*pdf_params) > u:
xl-= step
if isDomainFinite[0] and xl < domain[0]:
xl= domain[0]
break
else:
if isDomainFinite[0] and xl < domain[0]:
xl= domain[0]
while lnpdf(xr,*pdf_params) > u:
xr+= step
if isDomainFinite[1] and xr > domain[1]:
xr= domain[1]
break
else:
if isDomainFinite[1] and xr > domain[1]:
xr= domain[1]
return (xl,xr)
def slice_whole(x,u,step,lnpdf,pdf_params,isDomainFinite,domain):
"""
NAME:
slice_whole
PURPOSE:
create the interval in slice sampling by using the whole, finite domain
INPUT:
x - current sample
u - current (log) height of the slice
step - step to take in stepping out
lnpdf - function evaluating the log of the pdf
pdf_params - parameters to be passed to the pdf
isDomainFinite - is the domain finite? [bool,bool]
domain - the domain if it is finite (has no effect if the domain is not finite)
OUTPUT:
(xl,xr) - lower and upper bound to the interval
REVISION HISTORY:
2009-11-03 - Written - Bovy (NYU)
"""
return (domain[0],domain[1])
def slice_shrink(xp,x,interval):
"""
NAME:
slice_shrink
PURPOSE:
shrink the interval in slice sampling (Mackay 2003)
INPUT:
xp - proposed sample
x - current sample
interval - the current interval
OUTPUT:
new interval
REVISION HISTORY:
2009-10-29 - Written - Bovy (NYU)
"""
if xp > x:
xr= xp
xl= interval[0]
else:
xl= xp
xr= interval[1]
return (xl,xr)
def slice(initial_theta,step,lnpdf,pdf_params,create_method='step_out',isDomainFinite=[False,False],domain=[0.,0.],
nsamples=1,callback=None):
"""
NAME:
slice
PURPOSE:
simple slice sampling function (e.g., Neal 2003,Mackay 2003)
INPUT:
initial_theta - initial sample
step - stepping out step w
lnpdf - function evaluating the log of the pdf to be sampled
pdf_params - parameters to pass to the pdf (tuple)
create_method - 'step_out', 'double', or 'whole' (whole only works if the domain is finite; defaults to 'double')
nsamples - number of samples desired
isDomainFinite - is the domain finite? [bool,bool]
domain - the domain if it is finite (has no effect if the domain is not finite)
callback - function of parameter to be called after each new sample
OUTPUT:
list of samples, number if nsamples=1
REVISION HISTORY:
2009-10-29 - Written - Bovy (NYU)
DOCTEST:
>>> import numpy as nu
>>> nu.random.seed(1)
>>> def lngaussian(x,mean,var):
... return -.5*nu.log(2.*nu.pi*var)-0.5*(x-mean)**2./var
>>> pdf_params= (0.,1.)
>>> isDomainFinite= [False,False]
>>> domain= [0.,0.]
>>> create_method= 'double'
>>> nsamples= 100000
>>> samples= slice(0.1,1.,lngaussian,pdf_params,create_method,isDomainFinite,domain,nsamples=nsamples)
>>> samples= samples[nsamples/2:-1] #discard burn-in
>>> logprecision= -2.
>>> assert (nu.mean(samples)-0.)**2. < 10.**(logprecision*2.)
>>> assert (nu.std(samples)-1.)**2. < 10.**(logprecision*2.)
>>> from scipy import stats
>>> assert (stats.moment(samples,3)-0.)**2. < 10.**(logprecision)
>>> assert (stats.moment(samples,4)-stats.norm.moment(4))**2. < 10.**(logprecision)
>>> from scipy import special
>>> def lnbeta(x,a,b):
... return (a-1.)*nu.log(x)+(b-1.)*nu.log(1-x)-special.betaln(a,b)
>>> pdf_params= (.5,.5)
>>> isDomainFinite= [True,True]
>>> domain= [0.,1.]
>>> create_method= 'double'
>>> samples= slice(0.1,1.,lnbeta,pdf_params,create_method,isDomainFinite,domain,nsamples=nsamples)
>>> samples= samples[nsamples/2:-1] #discard burn-in
>>> logprecision= -2.
>>> assert (nu.mean(samples)-stats.beta.moment(1,pdf_params[0],pdf_params[1]))**2. < 10.**(logprecision*2.)
>>> assert (nu.var(samples)-stats.beta.moment(2,pdf_params[0],pdf_params[1])+stats.beta.moment(1,pdf_params[0],pdf_params[1])**2.)**2. < 10.**(logprecision*2.)
>>> create_method= 'step_out'
>>> samples= slice(0.1,1.,lnbeta,pdf_params,create_method,isDomainFinite,domain,nsamples=nsamples)
>>> samples= samples[nsamples/2:-1] #discard burn-in
>>> logprecision= -2.
>>> assert (nu.mean(samples)-stats.beta.moment(1,pdf_params[0],pdf_params[1]))**2. < 10.**(logprecision*2.)
>>> assert (nu.var(samples)-stats.beta.moment(2,pdf_params[0],pdf_params[1])+stats.beta.moment(1,pdf_params[0],pdf_params[1])**2.)**2. < 10.**(logprecision*2.)
>>> create_method= 'whole'
>>> samples= slice(0.1,1.,lnbeta,pdf_params,create_method,isDomainFinite,domain,nsamples=nsamples)
>>> samples= samples[nsamples/2:-1] #discard burn-in
>>> logprecision= -2.
>>> assert (nu.mean(samples)-stats.beta.moment(1,pdf_params[0],pdf_params[1]))**2. < 10.**(logprecision*2.)
>>> assert (nu.var(samples)-stats.beta.moment(2,pdf_params[0],pdf_params[1])+stats.beta.moment(1,pdf_params[0],pdf_params[1])**2.)**2. < 10.**(logprecision*2.)
"""
if create_method == 'step_out':
create_interval= slice_step_out
accept= slice_step_out_accept
elif create_method == 'double':
create_interval= slice_double
accept= slice_double_accept
else:
if isDomainFinite[0] and isDomainFinite[1]:
create_interval= slice_whole
accept= slice_step_out_accept
else:
create_interval= slice_double
accept= slice_double_accept
modify_interval= slice_shrink
x= initial_theta
logp= lnpdf(x,*pdf_params)
out= []
for ii in range(nsamples):
u= logp-stats.expon.rvs()#p.712 in Neal (2003)
(xl,xr)= create_interval(x,u,step,lnpdf,pdf_params,isDomainFinite,domain)
while True:
xp= stats.uniform.rvs()*(xr-xl)+xl
logpxp= lnpdf(xp,*pdf_params)
if logpxp >= u and accept(xp,x,u,step,(xl,xr),lnpdf,pdf_params):#Equal sign from Neal comment on Gelman blog
break
(xl,xr)= modify_interval(xp,x,(xl,xr))
if not callback is None: callback(xp)
out.append(xp)
x=xp
logp= logpxp
if nsamples == 1:
return out[0]
else:
return out
def slice_double_accept(xp,x,u,step,interval,lnpdf,pdf_params):
"""
NAME:
slice_double_accept
PURPOSE:
accept a step when using the doubling procedure
INPUT:
xp - proposed point
x - current point
u - log of the height of the slice
step - step parameter w
interval - (xl,xr)
lnpdf - function that evaluates the log of the pdf
pdf_params - parameters of the pdf
OUTPUT:
Whether to accept or not (Neal 2003)
BUGS:
Not as efficient as possible with lnpdf evaluations
HISTORY:
2009-10-30 - Written - Bovy (NYU)
"""
(xl,xr) = interval
d= False
acceptable= True
while xr-xl > 1.1*step:
m= (xl+xr)*.5
if (x < m and xp >= m) or (x >= m and xp < m):
d= True
if xp < m:
xr= m
else:
xl= m
if d and lnpdf(xl,*pdf_params) <= u and lnpdf(xr,*pdf_params) <= u:
acceptable= False
break
return acceptable
def slice_step_out_accept(xp,x,u,step,interval,lnpdf,pdf_params):
"""
NAME:
slice_step_out_accept
PURPOSE:
accept a step when using the stepping out procedure
INPUT:
xp - proposed point
x - current point
u - log of the height of the slice
step - step parameter w
interval - (xl,xr)
lnpdf - function that evaluates the log of the pdf
pdf_params - parameters of the pdf
OUTPUT:
True
HISTORY:
2009-10-30 - Written - Bovy (NYU)
"""
return True
if __name__ == '__main__':
import doctest
doctest.testmod()
```
#### File: bovy_mcmc/bovy_mcmc/elliptical_slice.py
```python
import math
import numpy
def elliptical_slice(initial_theta,prior,lnpdf,pdf_params=(),
cur_lnpdf=None,angle_range=None):
"""
NAME:
elliptical_slice
PURPOSE:
Markov chain update for a distribution with a Gaussian "prior" factored out
INPUT:
initial_theta - initial vector
prior - cholesky decomposition of the covariance matrix
(like what numpy.linalg.cholesky returns),
or a sample from the prior
lnpdf - function evaluating the log of the pdf to be sampled
pdf_params= parameters to pass to the pdf
cur_lnpdf= value of lnpdf at initial_theta (optional)
angle_range= Default 0: explore whole ellipse with break point at
first rejection. Set in (0,2*pi] to explore a bracket of
the specified width centred uniformly at random.
OUTPUT:
new_theta, new_lnpdf
HISTORY:
Originally written in matlab by <NAME> (http://homepages.inf.ed.ac.uk/imurray2/pub/10ess/elliptical_slice.m)
2012-02-24 - Written - Bovy (IAS)
"""
D= len(initial_theta)
if cur_lnpdf is None:
cur_lnpdf= lnpdf(initial_theta,*pdf_params)
# Set up the ellipse and the slice threshold
if len(prior.shape) == 1: #prior = prior sample
nu= prior
else: #prior = cholesky decomp
if not prior.shape[0] == D or not prior.shape[1] == D:
raise IOError("Prior must be given by a D-element sample or DxD chol(Sigma)")
nu= numpy.dot(prior,numpy.random.normal(size=D))
hh = math.log(numpy.random.uniform()) + cur_lnpdf
# Set up a bracket of angles and pick a first proposal.
# "phi = (theta'-theta)" is a change in angle.
if angle_range is None or angle_range == 0.:
# Bracket whole ellipse with both edges at first proposed point
phi= numpy.random.uniform()*2.*math.pi
phi_min= phi-2.*math.pi
phi_max= phi
else:
# Randomly center bracket on current point
phi_min= -angle_range*numpy.random.uniform()
phi_max= phi_min + angle_range
phi= numpy.random.uniform()*(phi_max-phi_min)+phi_min
# Slice sampling loop
while True:
# Compute xx for proposed angle difference and check if it's on the slice
xx_prop = initial_theta*math.cos(phi) + nu*math.sin(phi)
cur_lnpdf = lnpdf(xx_prop,*pdf_params)
if cur_lnpdf > hh:
# New point is on slice, ** EXIT LOOP **
break
# Shrink slice to rejected point
if phi > 0:
phi_max = phi
elif phi < 0:
phi_min = phi
else:
raise RuntimeError('BUG DETECTED: Shrunk to current position and still not acceptable.')
# Propose new angle difference
phi = numpy.random.uniform()*(phi_max - phi_min) + phi_min
return (xx_prop,cur_lnpdf)
``` |
{
"source": "jobovy/flexce",
"score": 2
} |
#### File: flexce/flexCE/utils.py
```python
from __future__ import print_function, division, absolute_import
import os
import sys
from pathlib import PurePath
import numpy as np
from yields import Yields
def define_mass_bins(low=0.1, high=100, dm_low=0.1, dm_high=1.):
"""Define stellar mass bins.
Args:
low (float): low end of lowest mass bin. Defaults to 0.1 Msun.
high (float): high end of higher mass bin. Defaults to 100 Msun.
dm_low (float): mass bin size below 8 Msun. Defaults to 0.1 Msun.
dm_high (float): mass bin size above 8 Msun. Defaults to 1 Msun.
Returns:
array: stellar mass bins
"""
mbins = np.concatenate((np.arange(low, 8., dm_low),
np.arange(8., high + 0.001, dm_high)))
return mbins
def load_yields(path, args, mass_bins):
"""Load yield grids.
Args:
path (str): data directory.
args (dict):
mass_bins (array): stellar mass bins.
Returns:
Yields instance
"""
try:
yld = Yields(path, snii_dir=args['snii_dir'],
agb_dir=args['agb_dir'], snia_dir=args['snia_dir'],
rprocess_dir=args['rprocess_dir'],
sprocess_dir=args['sprocess_dir'],
snia_model=args['snia_model'],
r_elements=args['r_elements'],
s_elements=args['s_elements'],
mbins=mass_bins)
except IOError as e:
print()
print(e)
# FIX: automatically run make_yield_grids.py
print('\nPlease create yield grids with')
print('python make_yield_grids.py\n')
sys.exit(1)
return yld
def set_path(path_in, default_path):
if os.path.isfile(path_in):
path = os.path.dirname(os.path.abspath(path_in))
filename = os.path.basename(path_in)
else:
path = default_path
filename = path_in
return filename, path
def substitute_dir_in_path(path, olddir, newdir):
pp = PurePath(path)
parts = [p if p != olddir else newdir for p in pp.parts]
return os.path.join(*parts)
def none_to_empty_dict(x):
"""If a variable is None, return an empty dictionary."""
if x is None:
x = {}
return x
``` |
{
"source": "jobovy/isodist",
"score": 2
} |
#### File: isodist/isodist/AnIsochrone.py
```python
import os, os.path
import csv
import math
import numpy
import gzip
from isodist.Isochrone import Isochrone, FEH2Z, Z2FEH, dict2recarray
from isodist.PadovaIsochrone import _DATADIR
_ANZSOLAR= 0.0176
_ZS= [-0.1,-0.2,-0.3,-0.5,-1.,-1.5,-2.,-3.,0.,0.1,0.2,0.4]
class AnIsochrone (Isochrone):
"""Class that represents a An+08 isochrone"""
def __init__(self,Z=None,filters=None,corrected=True):
"""
NAME:
__init__
PURPOSE:
initialize
INPUT:
corrected= if False, use un-corrected isochrones
Z= load only this metallicity (can be list)
OUTPUT:
HISTORY:
2011-08-05 - Written - Bovy (NYU)
BUGS:
Z determination needs to account for dY/dZ
"""
self._filters= ['u','g','r','i','z']
#Read the files
dicts= []
if Z is None: #Z here is actually FeH, we correct this later
ZS= _ZS
else:
if isinstance(Z,(list,numpy.ndarray)):
ZS= Z
else:
ZS= [Z]
for Zm in ZS:
if Zm >= 0.: signstr= 'p'
else: signstr= 'm'
if corrected: corrstr= 'corr'
else: corrstr= 'marcs'
dicts.append(read_an_isochrone(os.path.join(_DATADIR,
'an_isochrones',
signstr+'%03i_' % (int(numpy.fabs(100.*Zm)))
+corrstr+'.txt'),
filters=self._filters))
self._ZS= numpy.array([FEH2Z(z,zsolar=_ANZSOLAR) for z in ZS])
self._dicts= dicts
#Gather ages
self._logages= numpy.array(sorted(list(set(self._dicts[0]['logage']))))
return None
def __call__(self,logage,Z=None,feh=None,afe=None,maxm=None,
asrecarray=False,stage=None):
"""
NAME:
__call__
PURPOSE:
get a single isochrone from the library
INPUT:
logage - log_10 age
Z= or feh= metallicity (use Z_\odot=0.019)
afe= None (not supported for An; linear relation between afe and
feh is assumed)
maxm= maximum mass to consider (m_ini)
stage= if set, only show this evolutionary stage (NOT IMPLEMENTED FOR AN)
KEYWORDS:
asrecarray= if True, return recarray, otherwise dict
OUTPUT:
isochrone
HISTORY:
2011-08-04 - Written - Bovy (NYU)
"""
if not afe is None:
raise NotImplementedError("'afe=' not implemented for Padova isochrones")
if not feh is None:
Z= 10.**(feh+math.log10(_ANZSOLAR))
indx= (self._ZS == Z)
ii= 0
while (ii < len(self._dicts) and not indx[ii]): ii+= 1
if ii == len(self._dicts):
raise IOError("No isochrone found that matches this metallicity")
thisDict= self._dicts[ii]
if maxm is None:
indx= (thisDict['logage'] == logage)
else:
indx= (thisDict['logage'] == logage)*(thisDict['Mass'] < maxm)
if numpy.sum(indx) == 0:
raise IOError("No isochrone found that matches this logage")
outDict= {}
for key in thisDict.keys():
outDict[key]= thisDict[key][indx]
if asrecarray:
return dict2recarray(outDict)
else:
return outDict
def read_an_isochrone(name,filters=None):
"""
NAME:
read_an_isochrone
PURPOSE:
read an An isochrone file
INPUT:
name- name of the file
filters= list of filters in the file
OUTPUT:
dictionary with the table
HISTORY:
2011-08-04 - Written - Bovy (NYU)
"""
dialect= csv.excel
dialect.skipinitialspace=True
if name[-2:] == 'gz':
file= gzip.open(name,'r')
else:
file= open(name,'r')
reader= csv.reader(file,delimiter=' ',
dialect=dialect)
logage=[]
Mass= []
logL= []
logTe= []
logg= []
mbol= []
mags= []
for row in reader:
try:
if row[0][0:4] == 'Mass': #Header line to skip
continue
except IndexError:
pass
try:
if row[0] == 'Cluster': #Header line to extract age from
thislogage= numpy.log10(float(row[4]))
continue
except IndexError:
pass
logage.append(thislogage) #from the header, see above
Mass.append(float(row[0]))
logTe.append(numpy.log10(float(row[1])))
logL.append(float(row[2]))
logg.append(float(row[3]))
mbol.append(float(row[4]))
r= float(row[5])
gr = float(row[6])
gi = float(row[7])
gz = float(row[8])
ug = float(row[9])
mags.append([r+gr+ug, #u
r+gr, #g
r,
-gi+gr+r, #i
-gz+gr+r]) #z
#Load everything into a dictionary
outDict= {}
outDict['logage']= numpy.array(logage)
outDict['Mass']= numpy.array(Mass)
outDict['logL']= numpy.array(logL)
outDict['logTe']= numpy.array(logTe)
outDict['logg']= numpy.array(logg)
outDict['mbol']= numpy.array(mbol)
for ii in range(len(filters)):
thismag= []
for jj in range(len(mags)):
thismag.append(mags[jj][ii])
outDict[filters[ii]]= numpy.array(thismag)
return outDict
```
#### File: isodist/isodist/_isodist.py
```python
import numpy as nu
import scipy
_SCIPY_VERSION= [int(v.split('rc')[0])
for v in scipy.__version__.split('.')]
if _SCIPY_VERSION[0] < 1 and _SCIPY_VERSION[1] < 10:
from scipy.maxentropy import logsumexp
elif _SCIPY_VERSION[0] < 1 and _SCIPY_VERSION[1] < 19:
from scipy.misc import logsumexp
else:
from scipy.special import logsumexp
from isodist.Isochrone import Isochrone
from isodist.PadovaIsochrone import PadovaIsochrone
_LOGTOLN= 1./nu.log10(nu.exp(1.))
def eval_distpdf(ds,mdict=None,mivardict=None,logg=None,logg_ivar=None,
teff=None,teff_ivar=None,logage=None,logage_ivar=None,
Z=None,Z_ivar=None,feh=None,feh_ivar=None,
afe=None,afe_ivar=None,
padova=None,padova_type=None,
normalize=False,
ageprior=None):
"""
NAME:
eval_distpdf
PURPOSE:
evaluate the distance PDF for an object
INPUT:
ds- list or ndarray of distance (or a single distance), in kpc
mdict= dictionary of apparent magnitudes (e.g., {'J':12.,'Ks':13.})
mivardict= dictionary of magnitude inverse variances (matched to mdict)
logg= observed logg
logg_ivar= inverse variance of logg measurement
teff= observed T_eff [K]
logg_ivar= inverse variance of T_eff measurement
logage= observed log_10 age [Gyr]
logage_ivar= inverse variance of log_10 age measurement
Z= observed metallicity
Z_ivar= inverse variance of Z measurement
feh= observed metallicity (alternative to Z)
feh_ivar= inverse variance of FeH measurement
afe= observed [\alpha/Fe]
afe_ivar= [\alpha/Fe] inverse variance
padova= if True, use Padova isochrones,
if set to a PadovaIsochrone objects, use this
padova_type= type of PadovaIsochrone to use (e.g., 2mass-spitzer-wise)
normalize= if True, normalize output PDF (default: False)
ageprior= - None: flat in log age
- flat: flat in age
OUTPUT:
log of probability
HISTORY:
2011-04-28 - Written - Bovy (NYU)
"""
#load isochrones
if not padova is None and isinstance(padova,PadovaIsochrone):
iso= padova
elif not padova is None and isinstance(padova,bool) and padova:
iso= PadovaIsochrone(type=padova_type)
#Parse metallicity info
if not feh is None: raise NotImplementedError("'feh' not yet implemented")
#set up output
if isinstance(ds,(list,nu.ndarray)):
scalarOut= False
if isinstance(ds,list):
_ds= nu.array(ds)
else: _ds= ds
elif isinstance(ds,float):
scalarOut= True
_ds= [ds]
#Pre-calculate all absolute magnitudes
absmagdict= {}
for key in mdict.keys():
absmagdict[key]= -_distmodulus(_ds)+mdict[key]
#loop through isochrones
ZS= iso.Zs()
logages= iso.logages()
allout= nu.zeros((len(_ds),len(ZS),len(logages)))
for zz in range(len(ZS)):
for aa in range(len(logages)):
thisiso= iso(logages[aa],Z=ZS[zz])
dmpm= nu.roll(thisiso['M_ini'],-1)-thisiso['M_ini']
loglike= nu.zeros((len(_ds),len(thisiso['M_ini'])-1))
loglike-= nu.log(thisiso['M_ini'][-1])
for ii in range(1,len(thisiso['M_ini'])-1):
if dmpm[ii] > 0.:
loglike[:,ii]+= nu.log(dmpm[ii])
else:
loglike[:,ii]= nu.finfo(nu.dtype(nu.float64)).min
continue #no use in continuing here
if not teff is None:
loglike[:,ii]-= (teff-10**thisiso['logTe'][ii])**2.*teff_ivar
if not logg is None:
loglike[:,ii]-= (logg-thisiso['logg'][ii])**2.*logg_ivar
for key in mdict.keys():
#print absmagdict[key][2], thisiso[key][ii]
loglike[:,ii]-= (absmagdict[key]-thisiso[key][ii])**2.\
*mivardict[key]
#marginalize over mass
for jj in range(len(_ds)):
allout[jj,zz,aa]= logsumexp(loglike[jj,:])
#add age constraint and prior
if not logage is None:
allout[:,zz,aa]+= -(logage-logages[aa])**2.*logage_ivar
if not ageprior is None:
if isinstance(ageprior,str) and ageprior.lower() == 'flat':
allout[:,zz,aa]+= logages[aa]*_LOGTOLN
#add Z constraint and prior
if not Z is None:
allout[:,zz,:]+= -(Z-ZS[zz])**2.*Z_ivar
#prepare final output
out= nu.zeros(len(_ds))
for jj in range(len(_ds)):
out[jj]= logsumexp(allout[jj,:,:])
if normalize and not scalarOut:
out-= logsumexp(out)+nu.log(ds[1]-ds[0])
#return
if scalarOut: return out[0]
else: return out
def _distmodulus(d):
return 5.*nu.log10(d/.01)
``` |
{
"source": "jobovy/mwhalo-shape-2016",
"score": 2
} |
#### File: mwhalo-shape-2016/py/analyze_pal5.py
```python
import os, os.path
import pickle
import csv
from optparse import OptionParser
import subprocess
import numpy
import pal5_util
import MWPotential2014Likelihood
_DATADIR= os.getenv('DATADIR')
def get_options():
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
# Potential parameters
parser.add_option("--bf_b15",action="store_true",
dest="bf_b15",default=False,
help="If set, use the best-fit to the MWPotential2014 data")
parser.add_option("--seed",dest='seed',default=1,type='int',
help="seed for potential parameter selection and everything else")
parser.add_option("-i",dest='pindx',default=None,type='int',
help="Index into the potential samples to consider")
parser.add_option("--ro",dest='ro',default=pal5_util._REFR0,type='float',
help="Distance to the Galactic center in kpc")
parser.add_option("--vo",dest='vo',default=pal5_util._REFV0,type='float',
help="Circular velocity at ro in km/s")
parser.add_option("--samples_savefilename",
dest='samples_savefilename',
default='mwpot14varyc-samples.pkl',
help="Name of the file that contains the potential samples")
# c grid
parser.add_option("--cmin",dest='cmin',default=0.5,type='float',
help="Minimum c to consider")
parser.add_option("--cmax",dest='cmax',default=1.5,type='float',
help="Maximum c to consider")
parser.add_option("--cstep",dest='cstep',default=0.01,type='float',
help="C resolution")
# Heuristic guess of the maximum
parser.add_option("--alongbfpm",action="store_true",
dest="alongbfpm",default=False,
help="If set, move along the best-fit distance in the heuristic guess")
# Distances grid
parser.add_option("--dmin",dest='dmin',default=21.,type='float',
help="Minimum distance to consider in guess")
parser.add_option("--dmax",dest='dmax',default=25.,type='float',
help="Maximum distance to consider in guess")
parser.add_option("--dstep",dest='dstep',default=0.02,type='float',
help="Distance resolution")
parser.add_option("--dgridmin",dest='dgridmin',default=-6,type='int',
help="Minimum distance to consider in the final grid, in units of dstep from guess")
parser.add_option("--dgridmax",dest='dgridmax',default=7,type='int',
help="Maximum distance to consider in the final grid, in units of dstep from guess")
# PM offsets grid
parser.add_option("--pmmin",dest='pmmin',default=-0.02,type='float',
help="Minimum proper motion offset to consider in guess")
parser.add_option("--pmmax",dest='pmmax',default=0.02,type='float',
help="Maximum proper motion offset to consider in guess")
parser.add_option("--pmstep",dest='pmstep',default=0.001,type='float',
help="Proper-motion offset resolution")
parser.add_option("--pmgridmin",dest='pmgridmin',default=-3,type='int',
help="Minimum proper motion offset to consider in the final grid, in units of pmstep from guess")
parser.add_option("--pmgridmax",dest='pmgridmax',default=4,type='int',
help="Maximum proper motion offset to consider in the final grid, in units of pmstep from guess")
# Multi-processing
parser.add_option("-m",dest='multi',default=8,type='int',
help="Number of CPUs to use for streamdf setup")
# Increase the uncertainties?
parser.add_option("--emult",dest='emult',default=1.,type='float',
help="If set, multiply the uncertainties by this factor")
# Output file
parser.add_option("-o",dest='outfilename',
default=None,
help="Name of the file that will hold the output")
return parser
def filelen(filename):
p= subprocess.Popen(['wc','-l',filename],stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result,err = p.communicate()
if p.returncode != 0:
raise IOError(err)
return int(result.strip().split()[0])
def load_samples(options):
if os.path.exists(options.samples_savefilename):
with open(options.samples_savefilename,'rb') as savefile:
s= pickle.load(savefile)
else:
raise IOError("File %s that is supposed to hold the potential samples does not exist" % options.samples_savefilename)
return s
def analyze_one_model(options,cs,pot_params,dist,pmra,pmdec):
# First just compute the interpolation points, to adjust the width and length
interpcs=[0.5,0.75,0.875,1.,1.125,1.25,1.5,1.75]#,2.,2.25]
pal5varyc= pal5_util.predict_pal5obs(pot_params,interpcs,
dist=dist,pmra=pmra,pmdec=pmdec,
ro=options.ro,vo=options.vo,
interpk=1,
multi=options.multi,interpcs=interpcs)
if len(pal5varyc[6]) == 0:
out= numpy.zeros((len(cs),5))-1000000000000000.
return out
sigv=0.4*18./pal5varyc[4]
td=5.*25./pal5varyc[5]/(sigv/0.4)
td[td > 14.]= 14. # don't allow older than 14 Gyr
pal5varyc_like= pal5_util.predict_pal5obs(pot_params,cs,
dist=dist,pmra=pmra,pmdec=pmdec,
ro=options.ro,vo=options.vo,
multi=options.multi,
interpk=1,
interpcs=interpcs,
sigv=sigv,td=td)
pos_radec, rvel_ra= pal5_util.pal5_data()
pos_radec[:,2]*= options.emult
rvel_ra[:,2]*= options.emult
return pal5_util.pal5_lnlike(pos_radec,rvel_ra,*pal5varyc_like)
if __name__ == '__main__':
parser= get_options()
options,args= parser.parse_args()
# Set random seed
numpy.random.seed(options.seed)
# Load potential parameters
if options.bf_b15:
pot_params= [0.60122692,0.36273147,-0.97591502,-3.34169377,
0.71877924,-0.01519337,-0.01928001]
else:
pot_samples= load_samples(options)
rndindx= numpy.random.permutation(pot_samples.shape[1])[options.pindx]
pot_params= pot_samples[:,rndindx]
print pot_params
if os.path.exists(options.outfilename):
# Read the distance/proper motion grid from the file
with open(options.outfilename,'rb') as savefile:
line1= savefile.readline()
cline= savefile.readline()
dline= savefile.readline()
dsline= savefile.readline()
pmsline= savefile.readline()
ds= numpy.array([float(d) for d in dsline[1:].split(',')])
pms= numpy.array([float(pm) for pm in pmsline[1:].split(',')])
else:
# Find good distances and proper motions
pot= MWPotential2014Likelihood.setup_potential(pot_params,1.,
False,False,
options.ro,options.vo)
dpmguess= pal5_util.pal5_dpmguess(pot,alongbfpm=options.alongbfpm,
pmmin=options.pmmin,
pmmax=options.pmmax,
pmstep=options.pmstep,
dmin=options.dmin,dmax=options.dmax,
vo=options.vo,ro=options.ro)
bestd= dpmguess[0]
bestpm= dpmguess[1]
print "Good distance and proper motion are: %.2f, %.2f" \
% (bestd,bestpm)
# March along the maximum line if alongbfpm
ds= []
pms= []
for ii in range(options.dgridmin,options.dgridmax):
for jj in range(options.pmgridmin,options.pmgridmax):
ds.append(bestd+ii*options.dstep)
pms.append(bestpm+(ds[-1]-bestd)*0.099+jj*options.pmstep)
pmdecpar= 2.257/2.296
# Setup c grid
cs= numpy.arange(options.cmin,options.cmax+options.cstep/2.,options.cstep)
if cs[-1] > options.cmax+options.cstep/2.: cs= cs[:-1]
# Output
if os.path.exists(options.outfilename):
# Figure out how many ds were already run from the length of the file
flen= filelen(options.outfilename)
start_lines= 5
line_per_dist= 5
ii= (flen-start_lines)//line_per_dist
outfile= open(options.outfilename,'a')
else:
# Setup the file
outfile= open(options.outfilename,'w')
outfile.write('# potparams:%.8f,%.8f,%.8f,%.8f,%.8f\n' % \
(pot_params[0],pot_params[1],pot_params[2],
pot_params[3],pot_params[4]))
outfile.write('# cmin cmax cstep: %.3f,%.3f,%.3f\n' % \
(options.cmin,options.cmax,options.cstep))
outfile.write('# dmin dmax dstep: %.3f,%.3f,%.3f\n' % \
(numpy.amin(ds),numpy.amax(ds),options.dstep))
nextline= '#'
for ii,d in enumerate(ds):
nextline+= '%.4f' % d
if not ii == len(ds)-1: nextline+= ','
outfile.write(nextline+'\n')
nextline= '#'
for ii,pm in enumerate(pms):
nextline+= '%.4f' % pm
if not ii == len(pms)-1: nextline+= ','
outfile.write(nextline+'\n')
outfile.flush()
ii= 0
outwriter= csv.writer(outfile,delimiter=',')
# Analyze each distance, pmra, pmdec
while ii < len(ds):
dist= ds[ii]
pmo= pms[ii]
pmra= -2.296+pmo
pmdec= -2.257+pmo*pmdecpar
print "Working on %i: dist %.2f, pmra %.3f, pmdec %.3f" \
% (ii,dist,pmra,pmdec)
likes= analyze_one_model(options,cs,pot_params,dist,pmra,pmdec).T
# Write
for row in likes:
outwriter.writerow(row)
outfile.flush()
ii+= 1
outfile.close()
``` |
{
"source": "jobovy/shearing-sheet",
"score": 3
} |
#### File: shearing-sheet/shsh/integrate.py
```python
from jax import numpy as jnp
from jax.experimental.ode import odeint
def integrate(vxvv,t,Omega0,A):
"""Integrate an orbit in the shearing sheet
Parameters
----------
vxvv: array
Initial condition
t: array
Times over which to integrate, t[0] is the initial time
Omega0: float
Rotational frequency
A: float
Oort A
Returns
-------
array
Integrated orbit in the shearing sheet
"""
return odeint(_ode,vxvv,t,Omega0,A)
def _ode(y,t,Omega0,A):
vx= y[:2]
vv= y[2:]
acc= jnp.array([vv[0],vv[1],
2.*Omega0*vv[1]+4.*A*Omega0*vx[0],
-2.*Omega0*vv[0]],dtype='float64')
return acc
``` |
{
"source": "jobovy/simple-blurring-churning",
"score": 2
} |
#### File: jobovy/simple-blurring-churning/simple_blurring_churning.py
```python
from functools import wraps
import numpy
from scipy import integrate
from galpy.orbit import Orbit
from galpy.df import dehnendf
from skewnormal import skewnormal
_R0= 8. #kpc
_V0= 220. #kms
_LINEARENRICHMENT= False
_TAUEQ= 2.
_ZINIT= 0.12
# defaults
_SKEWM_DEFAULT= 0.4
_SKEWS_DEFAULT= 0.1
_SKEWA_DEFAULT= -4.
_DFEHDR_DEFAULT= -0.1
def scalarDecorator(func):
"""Decorator to return scalar outputs"""
@wraps(func)
def scalar_wrapper(*args,**kwargs):
if numpy.array(args[0]).shape == ():
scalarOut= True
newargs= ()
for ii in range(len(args)):
if ii == 0:
newargs= newargs+(numpy.array([args[ii]]),)
else:
newargs= newargs+(args[ii],)
args= newargs
else:
scalarOut= False
result= func(*args,**kwargs)
if scalarOut:
return result[0]
else:
return result
return scalar_wrapper
#
# PURE DYNAMICS
#
# Blurring p(Rg|R)
@scalarDecorator
def blurring_pRgR(Rg,R,sr=31.4,hr=3.,hs=267.):
"""
NAME:
blurring_pRgR
PURPOSE:
The distribution of guiding-center radii at a given R from blurring
INPUT:
Rg - Guiding center radius (/kpc), can be array
R - Given radius (/kpc)
sr= (31.4 km/s) velocity dispersion at R0
hr= (3 kpc) scale length
hs= (267 kpc) dispersion scale length
OUTPUT:
p(Rg|R)
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
# Setup the DF
df= dehnendf(beta=0.,profileParams=(hr/_R0,hs/_R0,sr/_V0))
out= numpy.empty(len(Rg))
for ii in range(len(Rg)):
out[ii]= df(Orbit([R/8.,0.,Rg[ii]/R]))
return out
# Churning p(final Rg | initial Rg, tau)
@scalarDecorator
def churning_pRgfRgi(Rgf,Rgi,tau,fmig=1.):
"""
NAME:
churning_pRgfRgi
PURPOSE:
The distribution of final guiding-center radii from churning
INPUT:
Rgf - Guiding center radius (/kpc), can be array
Rgi - Initial guiding-center radius (/kpc)
tau - time (/Gyr)
fmig= (1.) efficiency of migration relative to fiducial model
OUTPUT:
p(Rgf|Rgi)
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
sig= (0.01+0.2*fmig*tau*Rgi*numpy.exp(-(Rgi-8.)**2./16.))
return 1./numpy.sqrt(2.*numpy.pi)\
*numpy.exp(-(Rgi-Rgf)**2./2./sig)
# Churning p(Rg|R,tau)
@scalarDecorator
def churning_pRgRtau(Rg,R,tau,fmig=1.,sr=31.4,hr=3.,hs=267.):
"""
NAME:
churning_pRgRtau
PURPOSE:
The distribution of guiding-center radii at a given radius and time from churning
INPUT:
Rg - Guiding center radius (/kpc), can be array
R - Given radius (/kpc)
tau - time (/Gyr)
fmig= (1.) efficiency of migration relative to fiducial model
sr= (31.4 km/s) velocity dispersion at R0
hr= (3 kpc) scale length
hs= (267 kpc) dispersion scale length
OUTPUT:
p(Rg|R,tau)
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
# Setup the DF
df= dehnendf(beta=0.,profileParams=(hr/_R0,hs/_R0,sr/_V0))
out= numpy.empty(len(Rg))
for ii in range(len(Rg)):
out[ii]= integrate.fixed_quad(lambda x: df(Orbit([R/8.,0.,x/R]))\
*churning_pRgfRgi(x,Rg[ii],tau,
fmig=fmig),
numpy.amax([Rg[ii]-4.,0.]),
Rg[ii]+6.,n=40)[0]
return out
# Churning p(Rg|R)
@scalarDecorator
def churning_pRgR(Rg,R,fmig=1.,sr=31.4,hr=3.,hs=267.):
"""
NAME:
churning_pRgR
PURPOSE:
The distribution of guiding-center radii at a given radius from churning (assume constant SFH)
INPUT:
Rg - Guiding center radius (/kpc), can be array
R - Given radius (/kpc)
fmig= (1.) efficiency of migration relative to fiducial model
sr= (31.4 km/s) velocity dispersion at R0
hr= (3 kpc) scale length
hs= (267 kpc) dispersion scale length
OUTPUT:
p(Rg|R)
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
# Setup the DF
df= dehnendf(beta=0.,profileParams=(hr/_R0,hs/_R0,sr/_V0))
out= numpy.empty(len(Rg))
for ii in range(len(Rg)):
out[ii]= integrate.quadrature(\
lambda tau: integrate.fixed_quad(lambda x: \
df(Orbit([R/8.,0.,x/R]))
*churning_pRgfRgi(x,Rg[ii],
tau,fmig=fmig),
numpy.amax([Rg[ii]-4.,0.]),
Rg[ii]+6.,n=40)[0],
0.,10.,tol=10.**-4.,rtol=10**-3.,vec_func=False)[0]
return out
#
# MDFs
#
# Initial MDF at different radii
def pFehRg(Feh,Rg,
skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,skewa=_SKEWA_DEFAULT,
dFehdR=_DFEHDR_DEFAULT):
"""
NAME:
pFehRg
PURPOSE:
The initial MDF at a given radius Rg
INPUT:
Feh - Metallicity
Rg - Radius (/kpc)
skewm= (0.8) mean of the initial MDF at 4 kpc
skews= (0.2) standard dev. of the initial MDF
skewa= (-4.) skewness parameter of the initial MDF
dFehdR= (-0.15) initial metallicity gradient
OUTPUT:
p(Feh|Rg) at the initial time
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
return skewnormal(Feh,m=skewm+dFehdR*(Rg-4.),s=skews,a=skewa)\
*0.5*(1.+numpy.tanh((Feh-numpy.log10(_ZINIT))/0.2))
def pAgeRg(age,Rg,
skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,skewa=_SKEWA_DEFAULT,
dFehdR=_DFEHDR_DEFAULT):
"""
NAME:
pAgeRg
PURPOSE:
The initial age DF at a given radius Rg
INPUT:
age - age (/Gyr)
Rg - Radius (/kpc)
skewm= (0.8) mean of the initial MDF at 4 kpc
skews= (0.2) standard dev. of the initial MDF
skewa= (-4.) skewness parameter of the initial MDF
dFehdR= (-0.15) initial metallicity gradient
OUTPUT:
p(age|Rg) at the initial time
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
ageFeh= fehAgeRg(age,Rg,skewm=skewm,dFehdR=dFehdR)
return pFehRg(ageFeh,Rg,skewm=skewm,skews=skews,skewa=skewa,
dFehdR=dFehdR)\
/numpy.fabs(_dagedFehRg(ageFeh,Rg,skewm=skewm,dFehdR=dFehdR))
# The relation between age and metallicity at a given radius
def fehAgeRg(age,Rg,skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,dFehdR=_DFEHDR_DEFAULT):
"""
NAME:
fehAgeRg
PURPOSE:
The metallicity corresponding to a given age at radius Rg; assuming linear increase in exp(Feh) with time from Zinit Zsolar
INPUT:
age - age (/Gyr)
Rg - guiding-center radius (/kpc)
skewm= (0.8) mean of the initial MDF at 4 kpc
skews= (0.2) standard dev. of the initial MDF
dFehdR= (-0.15) initial metallicity gradient
OUTPUT:
FeH(age,Rg)
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
if _LINEARENRICHMENT:
return numpy.log10(_ZINIT+(10.-age)/10.*(10.**(skews+skewm+dFehdR*(Rg-4.))-_ZINIT))
else:
eq= 10.**(skews+skewm+dFehdR*(Rg-4.))
return numpy.log10((eq-_ZINIT)*(1.-numpy.exp(-(10.-age)/_TAUEQ))+_ZINIT)
def ageFehRg(feh,Rg,skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,dFehdR=_DFEHDR_DEFAULT):
"""
NAME:
ageFehRg
PURPOSE:
The age corresponding to a given metallicity at radius Rg; assuming linear increase in exp(Feh) with time from _ZINIT Zsolar
INPUT:
feh - metallicity
Rg - guiding-center radius (/kpc)
skewm= (0.8) mean of the initial MDF at 4 kpc
skews= (0.2) standard dev. of the initial MDF
dFehdR= (-0.15) initial metallicity gradient
OUTPUT:
age(FeH,Rg)
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
if _LINEARENRICHMENT:
return 10.-10.*(10.**feh-_ZINIT)/((10.**(skews+skewm+dFehdR*(Rg-4.))-_ZINIT))
else:
eq= 10.**(skews+skewm+dFehdR*(Rg-4.))
return 10.+numpy.log(1.-(10.**feh-_ZINIT)/(eq-_ZINIT))*_TAUEQ
def RgAgeFeh(age,feh,
skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,dFehdR=_DFEHDR_DEFAULT):
"""
NAME:
RgAgeFeh
PURPOSE:
The guiding-center radius corresponding to a given metallicity and age; assuming linear increase in exp(Feh) with time from _ZINIT Zsolar
INPUT:
age - age (/Gyr)
feh - metallicity
skewm= (0.8) mean of the initial MDF at 4 kpc
skews= (0.2) standard dev. of the initial MDF
dFehdR= (-0.15) initial metallicity gradient
OUTPUT:
Rg(age,FeH)
HISTORY:
2015-01-13 - Written - Bovy (IAS)
"""
if _LINEARENRICHMENT:
return (numpy.log10(10.*(10.**feh-_ZINIT)/(10.-age))-skewm-skews)/dFehdR+4.
else:
return (numpy.log10((10.**feh-_ZINIT)/(1.-numpy.exp(-(10.-age)/_TAUEQ))+_ZINIT)-skews-skewm)/dFehdR+4.
# Also need derivatives for integrals and distribution
def _dfehdAgeRg(age,Rg,skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,dFehdR=_DFEHDR_DEFAULT):
if _LINEARENRICHMENT:
return -1./10./numpy.log(10.)*(10.**(skews+skewm+dFehdR*(Rg-4.))-_ZINIT)\
/(_ZINIT+(10.-age)/10.*(numpy.exp(skews+skewm+dFehdR*(Rg-4.))-_ZINIT))
else:
eq= 10.**(skews+skewm+dFehdR*(Rg-4.))
return -(eq-_ZINIT)*numpy.exp(-(10.-age)/_TAUEQ)/(((eq-_ZINIT)*(1.-numpy.exp(-(10.-age)/_TAUEQ))+_ZINIT))/numpy.log(10.)/_TAUEQ
def _dagedFehRg(feh,Rg,skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,dFehdR=_DFEHDR_DEFAULT):
if _LINEARENRICHMENT:
return -10.*10.**feh*numpy.log(10.)\
/((10.**(skews+skewm+dFehdR*(Rg-4.))-_ZINIT))
else:
eq= 10.**(skews+skewm+dFehdR*(Rg-4.))
return -_TAUEQ*numpy.log(10.)*10.**feh/(eq-_ZINIT)/(1.-(10.**feh-_ZINIT)/(eq-_ZINIT))
def _dfehdRgAge(Rg,age,skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,dFehdR=_DFEHDR_DEFAULT):
feh= fehAgeRg(age,Rg,skewm=skewm,skews=skews,dFehdR=dFehdR)
if _LINEARENRICHMENT:
return (10.-age)/10.*10.**(skews+skewm+dFehdR*(Rg-4.))*dFehdR/10.**feh
else:
eq= 10.**(skews+skewm+dFehdR*(Rg-4.))
return (1.-numpy.exp(-(10.-age)/_TAUEQ))*eq*dFehdR/10.**feh
def test_dfehdAgeRg():
ages= numpy.tile(numpy.linspace(1.,10.,101),(101,1))
Rs= numpy.tile(numpy.linspace(2.,16.,101),(101,1)).T
dx= 10.**-8.
dage= _dfehdAgeRg(ages,Rs)
dage_num= (fehAgeRg(ages+dx,Rs)-fehAgeRg(ages,Rs))/dx
assert numpy.all(numpy.fabs(dage-dage_num) < 10.**-4.), 'dfehdAgeRg implemented incorrectly'
return None
def test_dagedFgeRg():
Rs= numpy.tile(numpy.linspace(2.,16.,101),(101,1)).T
fehs= numpy.tile(numpy.linspace(-1.5,0.7,101),(101,1))
Rs[fehs > fehAgeRg(0.,Rs)-0.03]= numpy.nan
dx= 10.**-8.
dfeh= _dagedFehRg(fehs,Rs)
dfeh_num= (ageFehRg(fehs+dx,Rs)-ageFehRg(fehs,Rs))/dx
assert numpy.all((numpy.fabs(dfeh-dfeh_num) < 10.**-4.)+numpy.isnan(dfeh)), 'dagedFehRg implemented incorrectly'
return None
def test_dfehdRgAge():
Rs= numpy.tile(numpy.linspace(2.,16.,101),(101,1)).T
ages= numpy.tile(numpy.linspace(1.,9.9,101),(101,1))
dx= 10.**-8.
dfeh= _dfehdRgAge(Rs,ages)
dfeh_num= (fehAgeRg(ages,Rs+dx)-fehAgeRg(ages,Rs))/dx
assert numpy.all((numpy.fabs(dfeh-dfeh_num) < 10.**-6.)+numpy.isnan(dfeh)), 'dfehdRgAge implemented incorrectly'
return None
# Blurring MDF
@scalarDecorator
def blurring_pFehR(feh,R,
skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,skewa=_SKEWA_DEFAULT,
dFehdR=_DFEHDR_DEFAULT,
sr=31.4,hr=3.,hs=267.):
"""
NAME:
blurring_pFehR
PURPOSE:
The distribution of metallicities at a given R due to blurring
INPUT:
feh - metallicity
R - radius (/kpc)
skewm= (0.8) mean of the initial MDF at 4 kpc
skews= (0.2) standard dev. of the initial MDF
skewa= (-4.) skewness parameter of the initial MDF
dFehdR= (-0.15) initial metallicity gradient
sr= (31.4 km/s) velocity dispersion at R0
hr= (3 kpc) scale length
hs= (267 kpc) dispersion scale length
OUTPUT:
p(Feh|R)
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
out= numpy.empty_like(feh)
for ii in range(len(feh)):
out[ii]= integrate.quadrature(lambda x: pFehRg(feh[ii],x,
skewm=skewm,skews=skews,
skewa=skewa,
dFehdR=dFehdR)\
*blurring_pRgR(x,R,sr=sr,
hr=hr,hs=hs),
numpy.amax([0.,R-4.]),R+4.,
tol=10.**-4.,rtol=10.**-3.,
vec_func=False)[0]
return out
# Churning age distribution
@scalarDecorator
def churning_pAgeR(age,R,
skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,skewa=_SKEWA_DEFAULT,
dFehdR=_DFEHDR_DEFAULT,fmig=1.,
sr=31.4,hr=3.,hs=267.):
"""
NAME:
churning_pAgeR
PURPOSE:
The distribution of ages at a given R due to churning
INPUT:
age - age (/Gyr)
R - radius (/kpc)
skewm= (0.8) mean of the initial MDF at 4 kpc
skews= (0.2) standard dev. of the initial MDF
skewa= (-4.) skewness parameter of the initial MDF
dFehdR= (-0.15) initial metallicity gradient
fmig= (1.) efficiency of migration relative to fiducial model
sr= (31.4 km/s) velocity dispersion at R0
hr= (3 kpc) scale length
hs= (267 kpc) dispersion scale length
OUTPUT:
p(age|R)
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
out= numpy.empty_like(age)
for ii in range(len(age)):
out[ii]= integrate.quadrature(\
lambda x: pFehRg(fehAgeRg(age[ii],x,skewm=skewm,skews=skews,
dFehdR=dFehdR),x,
skewm=skewm,skews=skews,
skewa=skewa,
dFehdR=dFehdR)\
*churning_pRgR(x,R,fmig=fmig,sr=sr,
hr=hr,hs=hs)\
/numpy.fabs(_dagedFehRg(fehAgeRg(age[ii],x,skewm=skewm,skews=skews,dFehdR=dFehdR),x)),
numpy.amax([0.,R-4.]),R+6.,
tol=10.**-4.,rtol=10.**-3.,
vec_func=False)[0]
return out
# Churning metallicity distribution
@scalarDecorator
def churning_pFehR(feh,R,
skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,
skewa=_SKEWA_DEFAULT,
dFehdR=_DFEHDR_DEFAULT,fmig=1.,
sr=31.4,hr=3.,hs=267.,
useInitialAgeDF=True):
"""
NAME:
churning_pFehR
PURPOSE:
The distribution of metallicities at a given R due to churning
INPUT:
feh - metallicity
R - radius (/kpc)
skewm= (0.8) mean of the initial MDF at 4 kpc
skews= (0.2) standard dev. of the initial MDF
skewa= (-4.) skewness parameter of the initial MDF
dFehdR= (-0.15) initial metallicity gradient
fmig= (1.) efficiency of migration relative to fiducial model
sr= (31.4 km/s) velocity dispersion at R0
hr= (3 kpc) scale length
hs= (267 kpc) dispersion scale length
OUTPUT:
p(Feh|R)
HISTORY:
2015-01-12 - Written - Bovy (IAS)
"""
out= numpy.empty_like(feh)
for ii in range(len(feh)):
# shortcut for Age DF
if useInitialAgeDF:
ageDF= lambda a: pAgeRg(a,R,skewm=skewm,skews=skews,skewa=skewa,
dFehdR=dFehdR)
else:
ageDF= lambda a: churning_pAgeR(a,R,skewm=skewm,skews=skews,
skewa=skewa,dFehdR=dFehdR,fmig=fmig,
sr=sr,hr=hr,hs=hs)
# Short age function, so we don't have to repeat this
ageFunc= lambda r: ageFehRg(feh[ii],r,skewm=skewm,skews=skews,
dFehdR=dFehdR)
# Integrate
def intFunc(x):
tage= ageFunc(x)
if tage <= 0. or tage > 10. or numpy.isnan(tage):
return 0.
return ageDF(ageFunc(x))\
*churning_pRgRtau(x,R,tage,
fmig=fmig,sr=sr,
hr=hr,hs=hs)\
/numpy.fabs(_dfehdAgeRg(tage,x))
out[ii]= integrate.quad(intFunc,
numpy.amax([0.,R-12.]),(feh[ii]-skewm-skews)/dFehdR+4.)[0]
return out
# Churning metallicity distribution
@scalarDecorator
def churning_pFehAgeR(feh,age,R,
skewm=_SKEWM_DEFAULT,skews=_SKEWS_DEFAULT,
skewa=_SKEWA_DEFAULT,
dFehdR=_DFEHDR_DEFAULT,fmig=1.,
sr=31.4,hr=3.,hs=267.,
useInitialAgeDF=True):
"""
NAME:
churning_pFehAgeR
PURPOSE:
The distribution of metallicities and ages at a given R due to churning
INPUT:
feh - metallicity (can be array)
age - age (/Gyr)
R - radius (/kpc)
skewm= (0.8) mean of the initial MDF at 4 kpc
skews= (0.2) standard dev. of the initial MDF
skewa= (-4.) skewness parameter of the initial MDF
dFehdR= (-0.15) initial metallicity gradient
fmig= (1.) efficiency of migration relative to fiducial model
sr= (31.4 km/s) velocity dispersion at R0
hr= (3 kpc) scale length
hs= (267 kpc) dispersion scale length
OUTPUT:
p(Feh,age|R)
HISTORY:
2015-01-13 - Written - Bovy (IAS)
"""
out= numpy.empty_like(feh)
# p(age|R)
if useInitialAgeDF:
ageP= pAgeRg(age,R,skewm=skewm,skews=skews,skewa=skewa,
dFehdR=dFehdR)
else:
ageP= churning_pAgeR(age,R,skewm=skewm,skews=skews,
skewa=skewa,dFehdR=dFehdR,fmig=fmig,
sr=sr,hr=hr,hs=hs)
for ii in range(len(feh)):
trg= RgAgeFeh(age,feh[ii],
skewm=skewm,skews=skews,dFehdR=dFehdR)
if trg <= 0. or numpy.isnan(trg) or numpy.isinf(trg) \
or feh[ii] > (skews+skewm+dFehdR*(trg-4.)):
out[ii]= 0.
continue
out[ii]= \
churning_pRgRtau(trg,R,age,fmig=fmig,sr=sr,hr=hr,hs=hs)\
*ageP/_dfehdRgAge(trg,age,skewm=skewm,skews=skews,dFehdR=dFehdR)
return out
def skewness(x,mdf):
m= numpy.nansum(x*mdf)/numpy.nansum(mdf)
return numpy.nansum((x-m)**3.*mdf)/numpy.nansum(mdf)\
/(numpy.nansum((x-m)**2*mdf)/numpy.nansum(mdf))**1.5
```
#### File: jobovy/simple-blurring-churning/skewnormal.py
```python
import numpy
from scipy.stats import norm
def skewnormal(x,m=0,s=1,a=-4.):
"""Specify in terms of the actual mean and sqrt{variance}"""
# Calculate the actual parameters
d= a/numpy.sqrt(1.+a**2.)
s/= numpy.sqrt(1.-2*d**2./numpy.pi)
m-= s*d*numpy.sqrt(2./numpy.pi)
# Evaluate
t= (x-m)/s
return 2.*norm.pdf(t)*norm.cdf(a*t)/s
``` |
{
"source": "jobovy/sphinx-astrorefs",
"score": 2
} |
#### File: sphinx-astrorefs/sphinx_astrorefs/pybtex_astro.py
```python
import re
from collections import Counter
import string
from pybtex.style.formatting.unsrt import Style as UnsrtStyle
from pybtex.style.formatting import toplevel
from pybtex.style.labels import BaseLabelStyle
from pybtex.style.sorting.author_year_title \
import SortingStyle as AuthorSortingStyle
from pybtex.richtext import Symbol, Text
from pybtex.plugin import register_plugin
from pybtex.style.template import (
field, first_of, href, join, names, optional, optional_field, sentence,
tag, together, words, node, FieldIsMissing
)
import latexcodec
from sphinx.util import logging
logger= logging.getLogger(__name__)
def decode_specialchars(input):
return input.replace('{', '').replace('}', '').encode().decode('latex')
# labels in astro format: author year, author & author year for two authors
# author et al. year for more than two
class AstroLabelStyle(BaseLabelStyle):
def format_labels(self, sorted_entries):
all_labels= []
for entry in sorted_entries:
if len(entry.persons['author']) == 1:
out= '{} {}'.format(entry.persons['author'][0].last_names[0],
entry.fields['year'])
elif len(entry.persons['author']) == 2:
out= '{} & {} {}'.format(entry.persons['author'][0]\
.last_names[0],
entry.persons['author'][1]\
.last_names[0],
entry.fields['year'])
else:
out= '{} et al. {}'.format(entry.persons['author'][0]\
.last_names[0],
entry.fields['year'])
all_labels.append(decode_specialchars(out))
# Deal with duplicates, assuming at most 26 duplicates
dups= [item for item, count in Counter(all_labels).items()
if count > 1]
for dup in dups:
idxs= [ii for ii,x in enumerate(all_labels) if x == dup]
for idx,lett in zip(idxs,string.ascii_lowercase):
last_digit= re.match('.+([0-9])[^0-9]*$',all_labels[idx])
all_labels[idx]= all_labels[idx][:last_digit.start(1)+1]\
+lett+all_labels[idx][last_digit.start(1)+1:]
sorted_entries[idx].fields['year']= \
sorted_entries[idx].fields['year']+lett
# Yield output
for entry, label in zip(sorted_entries,all_labels):
yield label
def dashify(text):
dash_re = re.compile(r'-+')
return Text(Symbol('ndash')).join(text.split(dash_re))
def format_first_and_last_name(person):
if len(person.first_names) > 0:
return '{} {}.'.format(decode_specialchars(person.last_names[0]),
str(person.first_names[0])[0])
else: # collaboration or similar
return '{}'.format(decode_specialchars(person.last_names[0]))
@node
def astro_names(children, context, role, **kwargs):
"""Return formatted names."""
assert not children
try:
persons= context['entry'].persons[role]
except KeyError:
raise FieldIsMissing(role, context['entry'])
if len(persons) > 5:
out= ''
for ii in range(5):
out+= '{}, '.format(format_first_and_last_name(persons[ii]))
return '{}et al.'.format(out)
elif len(persons) > 2:
out= ''
for ii in range(len(persons)-1):
out+= '{}, '.format(format_first_and_last_name(persons[ii]))
return '{}& {}'.format(out,
format_first_and_last_name(persons[-1]))
elif len(persons) == 2:
return '{} & {}'.format(format_first_and_last_name(persons[0]),
format_first_and_last_name(persons[1]))
else:
return format_first_and_last_name(persons[0])
class AstroStyle(UnsrtStyle):
default_label_style = AstroLabelStyle
default_sorting_style= AuthorSortingStyle
def format_names(self, role, as_sentence=True):
formatted_names= astro_names(role,sep=', ',
sep2 = ' & ',last_sep=', & ')
if as_sentence:
return sentence [formatted_names]
else:
return formatted_names
def format_author(self,e,as_sentence=True):
authors= self.format_names('author', as_sentence=False)
return sentence[authors]
def format_journal(self,e):
if 'doi' not in e.fields:
return field('journal')
else:
return href [
join [
'https://doi.org/',
field('doi', raw=True)
],
field('journal')
]
def format_volume(self,e):
if 'adsurl' not in e.fields:
return field('volume')
else:
return href [ field('adsurl',raw=True),
field('volume')]
def format_pages(self,e):
if 'eprint' not in e.fields:
return field('pages',apply_func=dashify)
else:
return href [
join [
'https://arxiv.org/abs/',
field('eprint', raw=True)
],
field('pages',apply_func=dashify)
]
def get_article_template(self, e):
if 'volume' not in e.fields:
journal_and_volume = tag('em') [self.format_journal(e)]
else:
journal_and_volume = join [
tag('em') [self.format_journal(e)],' ',
tag('strong') [self.format_volume(e)]
]
template = toplevel [
self.format_author(e),
self.format_title(e, 'title'),
sentence [
journal_and_volume,
join [
optional [self.format_pages(e)],
' (',field('year'),')']
]
]
return template
def get_book_template(self, e):
template = toplevel [
self.format_author_or_editor(e),
self.format_btitle(e, 'title'),
self.format_volume_and_series(e),
sentence [
join [
field('publisher'),
optional [', ',field('address')],
' (',field('year'),')']
],
optional[ sentence [ self.format_isbn(e) ] ],
self.format_web_refs(e),
]
return template
def register():
logger.info("Registering astro-style pybtex formatting...")
register_plugin('pybtex.style.formatting', 'astrostyle', AstroStyle)
```
#### File: sphinx-astrorefs/sphinx_astrorefs/resolve_aas.py
```python
import os.path
from sphinx.errors import ExtensionError
aas_macros_dict= {
'\\apjsupp' : 'Astrophys. J. Supp.',
'\\apjs' : 'Astrophys. J. Supp.',
'\\appjlett': 'Astrophys. J. Lett.',
'\\appjl' : 'Astrophys. J. Lett.',
'\\apj' : 'Astrophys. J.',
'\\aj' : 'Astron. J.',
'\\mnras' : 'Mon. Not. Roy. Astron. Soc.',
'\\baas' : 'Bull. AAS',
'\\bain' : 'Bull. Astron. Inst. Netherlands',
'\\aapr' : 'Astron. & Astrophys. Rev.',
'\\aaps' : 'Astron. & Astrophys. Supp.',
'\\astap' : 'Astron. & Astrophys.',
'\\aap' : 'Astron. & Astrophys.',
'\\araa' : 'Ann. Rev. Astron. Astrophys.',
'\\actaa' : 'Acta Astronomica',
'\\apss' : 'Astrophys. & Space Sci.',
'\\jcap' : 'J. Cosmo & Astropart. Phys.',
'\\nat' : 'Nature',
'\\nar' : 'New Astron. Rev.',
'\\na' : 'New Astron.',
'\\pra' : 'Phys. Rev. A',
'\\prb' : 'Phys. Rev. B',
'\\prc' : 'Phys. Rev. C',
'\\prd' : 'Phys. Rev. D',
'\\pre' : 'Phys. Rev. E',
'\\prl' : 'Phys. Rev. Lett.',
'\\pasa' : 'Pub. Astron. Soc. Aus.',
'\\pasp' : 'Pub. Astron. Soc. Pac.',
'\\pasj' : 'Pub. Astron. Soc. Japan',
'\\rmxaa' : 'Rev. Mex. Astron. & Astrofys.',
'\\ssr' : 'Space Sci. Rev.',
'\\applopt' : 'Appl. Opt.',
'\\ao' : 'Appl. Opt.',
'\\azh' : 'Astron. Zhu.',
'\\bac' : 'Bull. Astron. Czech.',
'\\caa' : 'Chin. Astron. Astrophys.',
'\\cjaa' : 'Chin. J. Astron. Astrophys.',
'\\icarus' : 'Icarus',
'\\jrasc' : 'J. RAS Can.',
'\\memras' : 'Mem. RAS',
'\\qjras' : 'Quat. J. RAS',
'\\skytel' : 'Sky & Telescope',
'\\solphys' : 'Sol. Phys.',
'\\sovast' : 'Sov. Astron.',
'\\zap' : 'ZeitSch. Astrophys.',
'\\iaucirc' : 'IAU Circs.',
'\\aplett' : 'Astrophys. Lett.',
'\\apspr' : 'Astrophys. Space Phys. Res.',
'\\fcp' : 'Fund. Cosm. Phys.',
'\\gca' : 'Geochim. Cosmochim. Acta',
'\\grl' : 'Geophys. Res. Lett',
'\\jcp' : 'J. Chem. Phys.',
'\\jgr' : 'J. Geophys. Res.',
'\\jqsrt' : 'J. Quant. Spec. Rad. Trans.',
'\\memsai' : 'Mem. Soc. Astron. Ital.',
'\\nphysa' : 'Nucl. Phys. A',
'\\physrep' : 'Phys. Rep.',
'\\physscr' : 'Phys. Scrip.',
'\\planss' : 'Plan. Space. Sci.',
'\\procspie': 'Proc. SPIE'
}
def resolve(app):
if not app.config.astrorefs_resolve_aas_macros:
return
if app.config.astrorefs_resolve_aas_macros_infile is None \
or app.config.astrorefs_resolve_aas_macros_outfile is None:
raise ExtensionError('sphinx-astrorefs: when resolving AAS macros, need to give original and target bib file name as "astrorefs_resolve_aas_macros_infile" and "astrorefs_resolve_aas_macros_outfile"')
with open(os.path.join(app.env.srcdir,
app.config.astrorefs_resolve_aas_macros_infile),'r') \
as infile:
with open(os.path.join(app.env.srcdir,
app.config.astrorefs_resolve_aas_macros_outfile),'w') \
as outfile:
for line in infile:
for key in aas_macros_dict.keys():
line= line.replace(key,aas_macros_dict[key])
outfile.write(line)
# Re-do this initialization to make sure the bibtex file is found
if hasattr(app.config,'bibtex_bibfiles'):
app.env.get_domain('cite').__init__(app.env)
``` |
{
"source": "jobovy/streamgap-pepper",
"score": 2
} |
#### File: jobovy/streamgap-pepper/gd1_util.py
```python
from galpy.df import streamdf, streamgapdf
from streampepperdf import streampepperdf
from galpy.orbit import Orbit
from galpy.potential import LogarithmicHaloPotential
from galpy.actionAngle import actionAngleIsochroneApprox
from galpy.util import bovy_conversion #for unit conversions
R0, V0= 8., 220.
def setup_gd1model(leading=True,
timpact=None,
hernquist=True,
age=9.,
singleImpact=False,
length_factor=1.,
**kwargs):
lp= LogarithmicHaloPotential(normalize=1.,q=0.9)
aAI= actionAngleIsochroneApprox(pot=lp,b=0.8)
obs= Orbit([1.56148083,0.35081535,-1.15481504,0.88719443,
-0.47713334,0.12019596])
sigv= 0.365/2.*(9./age) #km/s, /2 bc tdis x2, adjust for diff. age
if timpact is None:
sdf= streamdf(sigv/220.,progenitor=obs,pot=lp,aA=aAI,leading=leading,
nTrackChunks=11,
tdisrupt=age/bovy_conversion.time_in_Gyr(V0,R0),
Vnorm=V0,Rnorm=R0)
elif singleImpact:
sdf= streamgapdf(sigv/220.,progenitor=obs,pot=lp,aA=aAI,
leading=leading,
nTrackChunks=11,
tdisrupt=age/bovy_conversion.time_in_Gyr(V0,R0),
Vnorm=V0,Rnorm=R0,
timpact=timpact,
spline_order=3,
hernquist=hernquist,**kwargs)
else:
sdf= streampepperdf(sigv/220.,progenitor=obs,pot=lp,aA=aAI,
leading=leading,
nTrackChunks=101,
tdisrupt=age/bovy_conversion.time_in_Gyr(V0,R0),
Vnorm=V0,Rnorm=R0,
timpact=timpact,
spline_order=1,
hernquist=hernquist,
length_factor=length_factor)
sdf.turn_physical_off()
return sdf
```
#### File: jobovy/streamgap-pepper/make_pepper_movie.py
```python
import os, os.path
import copy
import pickle
import numpy
import tqdm
import subprocess
import statsmodels.api as sm
import matplotlib
matplotlib.use('Agg')
from galpy.util import bovy_plot, bovy_conversion, multi
from matplotlib import pyplot
import seaborn as sns
import simulate_streampepper
from streampepperdf import streampepperdf
from optparse import OptionParser
from gd1_util import R0, V0
def get_options():
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
# make frames or combine into movie?
parser.add_option("--movie",action="store_true",
dest="movie",default=False,
help="If set, combine frames into a movie")
# Output
parser.add_option("-o",dest='outputfilename',default=None,
help="Name of the file that will hold the movie")
parser.add_option("--base",dest='basefilename',default=None,
help="Basename of the frame files (incl. directory)")
parser.add_option("--skip",action="store_true",
dest="skip",default=False,
help="If set, skip existing frames")
# Simulation options
parser.add_option("-n",dest='nparticles',default=10000,type='int',
help="Number of particles to sample for each tail")
parser.add_option("--nsnap",dest='nsnap',default=1024,type='int',
help="Number of snapshots to produce (must match a gd1pappernsnap.pkl file and gd1peppernsnap_trailing file)")
# Single impact?
parser.add_option("--single",action="store_true",
dest="single",default=False,
help="If set, perform a single, large impact")
parser.add_option("--singletimpact",
dest='singletimpact',default=4.,type='float',
help="Time of impact (in Gyr) for the single impact case")
parser.add_option("--singlemimpact",
dest='singlemimpact',default=10.,type='float',
help="Mass of impact (in 10^7 msun) for the single impact case")
# Full range
parser.add_option("--pepper",action="store_true",
dest="pepper",default=False,
help="If set, sample from all possible impacts")
parser.add_option("--timescdm",dest='timescdm',default=1.,type='float',
help="Use a rate that is timescdm times the CDM prediction")
parser.add_option("--Mmin",dest='Mmin',default=5.,type='float',
help="log10() of the minimum mass in Msun")
parser.add_option("--Mmax",dest='Mmax',default=9.,type='float',
help="log10() of the max mass in Msun")
# Ploting options
parser.add_option("--noaxes",action="store_true",
dest="noaxes",default=False,
help="If set, don't plot axes")
parser.add_option("--lowess",action="store_true",
dest="lowess",default=False,
help="If set, add a trendline to each tail")
return parser
def create_frames(options,args):
# First reload the model
with open('gd1pepper%isampling.pkl' % options.nsnap,'rb') as savefile:
sdf_pepper_leading= pickle.load(savefile)
with open('gd1pepper%isampling_trailing.pkl' % options.nsnap,'rb') as savefile:
sdf_pepper_trailing= pickle.load(savefile)
# Output times
timpacts= sdf_pepper_leading._uniq_timpact
# Sample unperturbed aAt
numpy.random.seed(1)
Oml,anglel,dtl= super(streampepperdf,sdf_pepper_leading)._sample_aAt(\
options.nparticles)
Omt,anglet,dtt= super(streampepperdf,sdf_pepper_trailing)._sample_aAt(\
options.nparticles)
# Setup progenitor
prog= sdf_pepper_leading._progenitor().flip()
prog.integrate(numpy.linspace(0.,9./bovy_conversion.time_in_Gyr(V0,R0),
10001),sdf_pepper_leading._pot)
prog.flip()
# Setup impacts
if options.single:
# Hit the leading arm and the trailing arm 1 Gyr later
m= options.singlemimpact/bovy_conversion.mass_in_1010msol(V0,R0)/1000.
t= timpacts[\
numpy.argmin(\
numpy.fabs(\
numpy.array(timpacts)\
-options.singletimpact\
/bovy_conversion.time_in_Gyr(V0,R0)))]
sdf_pepper_leading.set_impacts(\
impactb=[0.5*simulate_streampepper.rs(options.singlemimpact*10.**7.)],
subhalovel=numpy.array([[-25.,155.,30.]])/V0,
impact_angle=[0.2],
timpact=[t],
GM=[m],rs=[simulate_streampepper.rs(options.singlemimpact*10.**7.)])
# Trailing
m= options.singlemimpact/bovy_conversion.mass_in_1010msol(V0,R0)/1000.
t= timpacts[\
numpy.argmin(\
numpy.fabs(\
numpy.array(timpacts)\
-(options.singletimpact+1.)\
/bovy_conversion.time_in_Gyr(V0,R0)))]
sdf_pepper_trailing.set_impacts(\
impactb=[1.*simulate_streampepper.rs(options.singlemimpact*10.**7.)],
subhalovel=numpy.array([[-25.,155.,30.]])/V0,
impact_angle=[-0.3],
timpact=[t],
GM=[m],rs=[simulate_streampepper.rs(options.singlemimpact*10.**7.)])
elif options.pepper:
# Sampling functions
massrange=[options.Mmin,options.Mmax]
plummer= False
Xrs= 5.
nsubhalo= simulate_streampepper.nsubhalo
rs= simulate_streampepper.rs
dNencdm= simulate_streampepper.dNencdm
sample_GM= lambda: (10.**((-0.5)*massrange[0])\
+(10.**((-0.5)*massrange[1])\
-10.**((-0.5)*massrange[0]))\
*numpy.random.uniform())**(1./(-0.5))\
/bovy_conversion.mass_in_msol(V0,R0)
rate_range= numpy.arange(massrange[0]+0.5,massrange[1]+0.5,1)
rate= numpy.sum([dNencdm(sdf_pepper_leading,10.**r,Xrs=Xrs,
plummer=plummer)
for r in rate_range])
rate= options.timescdm*rate
sample_rs= lambda x: rs(x*bovy_conversion.mass_in_1010msol(V0,R0)*10.**10.,
plummer=plummer)
# Pepper both
sdf_pepper_leading.simulate(rate=rate,sample_GM=sample_GM,
sample_rs=sample_rs,Xrs=Xrs)
print numpy.amax(sdf_pepper_leading._GM)*bovy_conversion.mass_in_1010msol(V0,R0)
sdf_pepper_trailing.simulate(rate=rate,sample_GM=sample_GM,
sample_rs=sample_rs,Xrs=Xrs)
print numpy.amax(sdf_pepper_trailing._GM)*bovy_conversion.mass_in_1010msol(V0,R0)
else:
# Hit both with zero
sdf_pepper_leading.set_impacts(\
impactb=[0.],
subhalovel=numpy.array([[-25.,155.,30.]])/V0,
impact_angle=[0.2],
timpact=[timpacts[0]],
GM=[0.],rs=[1.])
sdf_pepper_trailing.set_impacts(\
impactb=[0.],
subhalovel=numpy.array([[-25.,155.,30.]])/V0,
impact_angle=[-0.2],
timpact=[timpacts[0]],
GM=[0.],rs=[1.])
# Now make all frames
dum= multi.parallel_map(
(lambda x: _plot_one_frame(x,options,prog,timpacts,
sdf_pepper_leading,sdf_pepper_trailing,
Oml,Omt,anglel,anglet,dtl,dtt)),
range(len(timpacts)),
numcores=numpy.amin([len(timpacts),30]))
return None
def _plot_one_frame(ii,options,prog,timpacts,
sdf_pepper_leading,sdf_pepper_trailing,
Oml,Omt,anglel,anglet,dtl,dtt):
bovy_plot.bovy_print(fig_height=3.,fig_width=7.)
xlabel= r'$X_{\mathrm{orb}}\,(\mathrm{kpc})$'
ylabel= r'$Y_{\mathrm{orb}}\,(\mathrm{kpc})$'
xrange= [-12.,12.]
yrange= [-1.75,.3]
TL= _projection_orbplane(prog)
if options.skip and os.path.exists(options.basefilename+'_%s.png'\
% str(ii).zfill(5)):
return None
timpact= timpacts[-ii-1]
overplot= False
for sdf_pepper,Om,angle,dt \
in zip([sdf_pepper_leading,sdf_pepper_trailing],
[Oml,Omt],[anglel,anglet],[dtl,dtt]):
tOm= copy.deepcopy(Om)
tangle= copy.deepcopy(angle)
tdt= copy.deepcopy(dt)
if timpact > sdf_pepper._timpact[-1]:
# No impact yet
tangle-= tOm*timpact
else:
tangle-= tOm*timpact
# Apply all kicks relevant for this impact
# (copied from streampepperdf)
dangle_at_impact= angle\
-numpy.tile(sdf_pepper._progenitor_angle.T,
(options.nparticles,1)).T\
-(tOm-numpy.tile(sdf_pepper._progenitor_Omega.T,
(options.nparticles,1)).T)\
*sdf_pepper._timpact[-1]
dangle_par_at_impact=\
numpy.dot(dangle_at_impact.T,
sdf_pepper._dsigomeanProgDirection)\
*sdf_pepper._sgapdfs[-1]._gap_sigMeanSign
dOpar= numpy.dot((tOm-numpy.tile(sdf_pepper._progenitor_Omega.T,
(options.nparticles,1)).T).T,
sdf_pepper._dsigomeanProgDirection)\
*sdf_pepper._sgapdfs[-1]._gap_sigMeanSign
relevant_timpact= sdf_pepper._timpact[\
sdf_pepper._timpact > timpact]
for kk,ti in enumerate(relevant_timpact[::-1]):
# Calculate and apply kicks (points not yet released have
# zero kick)
dOr= sdf_pepper._sgapdfs[-kk-1]._kick_interpdOr(dangle_par_at_impact)
dOp= sdf_pepper._sgapdfs[-kk-1]._kick_interpdOp(dangle_par_at_impact)
dOz= sdf_pepper._sgapdfs[-kk-1]._kick_interpdOz(dangle_par_at_impact)
tOm[0,:]+= dOr
tOm[1,:]+= dOp
tOm[2,:]+= dOz
if kk < len(relevant_timpact)-1:
run_to_timpact= relevant_timpact[::-1][kk+1]
else:
run_to_timpact= timpact
tangle[0,:]+=\
sdf_pepper._sgapdfs[-kk-1]._kick_interpdar(dangle_par_at_impact)\
+dOr*(ti-timpact)
tangle[1,:]+=\
sdf_pepper._sgapdfs[-kk-1]._kick_interpdap(dangle_par_at_impact)\
+dOp*(ti-timpact)
tangle[2,:]+=\
sdf_pepper._sgapdfs[-kk-1]._kick_interpdaz(dangle_par_at_impact)\
+dOz*(ti-timpact)
# Update parallel evolution
dOpar+=\
sdf_pepper._sgapdfs[-kk-1]._kick_interpdOpar(dangle_par_at_impact)
dangle_par_at_impact+= dOpar*(ti-run_to_timpact)
# Convert to RvR coordinates for this time
coorddf= copy.deepcopy(sdf_pepper._sgapdfs_coordtransform[timpact])
coorddf._interpolate_stream_track_kick_aA()
coorddf._interpolatedObsTrack= coorddf._kick_interpolatedObsTrack
coorddf._ObsTrack= coorddf._gap_ObsTrack
coorddf._interpolatedObsTrackXY= coorddf._kick_interpolatedObsTrackXY
coorddf._ObsTrackXY= coorddf._gap_ObsTrackXY
coorddf._allinvjacsTrack= coorddf._gap_allinvjacsTrack
coorddf._interpolatedObsTrackAA= coorddf._kick_interpolatedObsTrackAA
coorddf._ObsTrackAA= coorddf._gap_ObsTrackAA
coorddf._nTrackChunks= coorddf._nTrackChunksImpact
coorddf._thetasTrack= coorddf._gap_thetasTrack
coorddf._interpolatedThetasTrack= coorddf._kick_interpolatedThetasTrack
coorddf._progenitor_angle-= coorddf._progenitor_Omega*timpact
coorddf._progenitor_angle= coorddf._progenitor_angle % (2.*numpy.pi)
tangle= tangle % (2.*numpy.pi)
RvR= coorddf._approxaAInv(tOm[0,:],tOm[1,:],tOm[2,:],
tangle[0,:],tangle[1,:],tangle[2,:],
interp=True)
cindx= numpy.array([coorddf._find_closest_trackpointaA(tOm[0,ll],tOm[1,ll],tOm[2,ll],
tangle[0,ll],tangle[1,ll],tangle[2,ll],
interp=True)
for ll in range(len(tOm[0]))],dtype='int')
# Progenitor and its orbit at the current time
cprog= prog(timpact)
cprog.integrate(numpy.linspace(0.,3.,101),sdf_pepper._pot)
cprogf= cprog.flip()
cprogf.integrate(numpy.linspace(0.,3.,101),sdf_pepper._pot)
# compute the orbit and rotate everything such that the derivative
# of the orbit points along X
tvec= numpy.empty((3,2))
tvec[0,0]= cprog.x(numpy.linspace(0.,3.,101)[1])
tvec[1,0]= cprog.y(numpy.linspace(0.,3.,101)[1])
tvec[2,0]= cprog.z(numpy.linspace(0.,3.,101)[1])
tvec[0,1]= cprogf.x(numpy.linspace(0.,3.,101)[1])
tvec[1,1]= cprogf.y(numpy.linspace(0.,3.,101)[1])
tvec[2,1]= cprogf.z(numpy.linspace(0.,3.,101)[1])
tx= numpy.dot(TL,tvec)[0]
ty= numpy.dot(TL,tvec)[1]
dx= tx[1]-tx[0]
dy= ty[1]-ty[0]
mag= numpy.sqrt(dx**2.+dy**2.)
dx/= mag
dy/= mag
rot= numpy.array([[dx,dy],[-dy,dx]])
# Plot
indx= tdt > timpact
# Rotate to 'orbital plane'
tvec= numpy.empty((3,options.nparticles))
tvec[0,:]= RvR[0]*numpy.cos(RvR[5])
tvec[1,:]= RvR[0]*numpy.sin(RvR[5])
tvec[2,:]= RvR[3]
tx= numpy.dot(TL,tvec)[0]
ty= numpy.dot(TL,tvec)[1]
tpx= numpy.dot(TL,[cprog.x(),cprog.y(),cprog.z()])[0]
tpy= numpy.dot(TL,[cprog.x(),cprog.y(),cprog.z()])[1]
plotx= numpy.dot(rot,numpy.array([(tx[indx]-tpx)*R0,
(ty[indx]-tpy)*R0]))[0]
ploty= numpy.dot(rot,numpy.array([(tx[indx]-tpx)*R0,
(ty[indx]-tpy)*R0]))[1]
txrange=\
[xrange[0]\
*(9.-timpact*bovy_conversion.time_in_Gyr(V0,R0))/9.-1.,
xrange[1]\
*(9.-timpact*bovy_conversion.time_in_Gyr(V0,R0))/9.+1.]
bovy_plot.bovy_plot(plotx,
ploty,
'.',ms=2.,
color=sns.color_palette("colorblind")[3],
alpha=0.2/(options.nparticles/10000.),
xlabel=xlabel,
ylabel=ylabel,
xrange=txrange,
yrange=yrange,
zorder=4,
overplot=overplot)
# Add trendline
if options.lowess:
lowess = sm.nonparametric.lowess
z= lowess(ploty,plotx,frac=0.02/(options.nparticles/10000.))
bovy_plot.bovy_plot(z[::100*(options.nparticles//10000),0],
z[::100*(options.nparticles//10000),1],
color=sns.color_palette('colorblind')[2],
lw=1.5,zorder=2,overplot=True)
overplot= True
# Plot progenitor orbit
for tp in [cprog,cprogf]:
tvec= numpy.empty((3,101))
tvec[0]= tp.x(numpy.linspace(0.,3.,101))
tvec[1]= tp.y(numpy.linspace(0.,3.,101))
tvec[2]= tp.z(numpy.linspace(0.,3.,101))
tx= numpy.dot(TL,tvec)[0]
ty= numpy.dot(TL,tvec)[1]
plotx= numpy.dot(rot,numpy.array([(tx-tpx)*R0,
(ty-tpy)*R0]))[0]
ploty= numpy.dot(rot,numpy.array([(tx-tpx)*R0,
(ty-tpy)*R0]))[1]
bovy_plot.bovy_plot(plotx,ploty,
color=sns.color_palette('colorblind')[0],
lw=1.25,zorder=0,
overplot=True)
if options.noaxes:
pyplot.subplots_adjust(bottom=0.02,left=0.02,right=.98,top=.98)
else:
pyplot.subplots_adjust(bottom=0.175,left=0.11,right=0.965,top=0.95)
if options.noaxes:
pyplot.axis('off')
bovy_plot.bovy_end_print(options.basefilename+'_%s.png'\
% str(ii).zfill(5))
return None
def _projection_orbplane(prog):
L= prog.L()[0]
Lx= L[0]
Ly= L[1]
Lz= L[2]
L= numpy.sqrt(Lx**2.+Ly**2.+Lz**2.)
Lx/= L
Ly/= L
Lz/= L
Txz= numpy.zeros((3,3))
Tz= numpy.zeros((3,3))
Txz[0,0]= Lx/numpy.sqrt(Lx**2.+Ly**2.)
Txz[1,1]= Lx/numpy.sqrt(Lx**2.+Ly**2.)
Txz[1,0]= Ly/numpy.sqrt(Lx**2.+Ly**2.)
Txz[0,1]= -Ly/numpy.sqrt(Lx**2.+Ly**2.)
Txz[2,2]= 1.
Tz[0,0]= Lz
Tz[1,1]= 1.
Tz[2,2]= Lz
Tz[2,0]= -numpy.sqrt(Lx**2.+Ly**2.)
Tz[0,2]= numpy.sqrt(Lx**2.+Ly**2.)
TL= numpy.dot(Tz,Txz)
return TL
def create_movie(options,args):
framerate= 25
bitrate= 1000000
try:
subprocess.check_call(['ffmpeg',
'-i',
options.basefilename+'_%05d.png',
'-y',
'-framerate',str(framerate),
'-r',str(framerate),
'-b', str(bitrate),
options.outputfilename])
except subprocess.CalledProcessError:
print "'ffmpeg' failed"
return None
if __name__ == '__main__':
parser= get_options()
options,args= parser.parse_args()
if not options.movie:
create_frames(options,args)
else:
create_movie(options,args)
``` |
{
"source": "jobovy/stream-stream",
"score": 2
} |
#### File: stream-stream/py/stream2_util.py
```python
import os, os.path
import csv
import numpy
import copy
from galpy.potential import LogarithmicHaloPotential
from galpy.util import bovy_coords, multi
from galpy.actionAngle import actionAngleIsochroneApprox
R0= 8.
V0= 220.
lp= LogarithmicHaloPotential(normalize=1.,q=0.9)
def rectangular_to_cylindrical(xv):
R,phi,Z= bovy_coords.rect_to_cyl(xv[:,0],xv[:,1],xv[:,2])
vR,vT,vZ= bovy_coords.rect_to_cyl_vec(xv[:,3],xv[:,4],xv[:,5],
R,phi,Z,cyl=True)
out= numpy.empty_like(xv)
# Preferred galpy arrangement of cylindrical coordinates
out[:,0]= R
out[:,1]= vR
out[:,2]= vT
out[:,3]= Z
out[:,4]= vZ
out[:,5]= phi
return out
def calc_aA_sim(RvR,filename,snap_gc):
# Calculate the action angle variables for a simulation and store
if not os.path.exists(filename):
aAI= actionAngleIsochroneApprox(pot=lp,b=0.8)
nbatch= 20
multiOut= multi.parallel_map(\
lambda x: aAI.actionsFreqsAngles(RvR[x*nbatch:(x+1)*nbatch,0]/R0,
RvR[x*nbatch:(x+1)*nbatch,1]/V0,
RvR[x*nbatch:(x+1)*nbatch,2]/V0,
RvR[x*nbatch:(x+1)*nbatch,3]/R0,
RvR[x*nbatch:(x+1)*nbatch,4]/V0,
RvR[x*nbatch:(x+1)*nbatch,5]),
range(len(snap_gc)//nbatch),
numcores=25)
acfs= numpy.reshape(numpy.swapaxes(numpy.array(multiOut),0,1),
(9,numpy.prod(numpy.array(multiOut).shape)//9))
# Write to file
csvfile= open(filename,'w')
writer= csv.writer(csvfile,delimiter=',')
for jj in range(len(acfs[0])):
writer.writerow([acfs[0][jj],acfs[1][jj],acfs[2][jj],
acfs[3][jj],acfs[4][jj],acfs[5][jj],
acfs[6][jj],acfs[7][jj],acfs[8][jj]])
csvfile.flush()
csvfile.close()
else:
acfs= numpy.loadtxt(filename,delimiter=',').T
return acfs
def calc_apar(acfs,angle=None,freq=False,debrisThreshold=6.):
# Calculate the parallel angle offset,
# of angle if set (otherwise of the entire simulation),
# angle is a frequency if freq
thetar= acfs[6]
thetap= acfs[7]
thetaz= acfs[8]
if not angle is None:
if not freq:
angle[0]= (numpy.pi+(angle[0]-numpy.median(thetar))) % (2.*numpy.pi)
angle[1]= (numpy.pi+(angle[1]-numpy.median(thetap))) % (2.*numpy.pi)
angle[2]= (numpy.pi+(angle[2]-numpy.median(thetaz))) % (2.*numpy.pi)
thetap= (numpy.pi+(thetap-numpy.median(thetap))) % (2.*numpy.pi)
debrisIndx= numpy.fabs(thetap-numpy.pi) > (debrisThreshold*numpy.median(numpy.fabs(thetap-numpy.median(thetap))))
if angle is None:
thetar= (numpy.pi+(thetar-numpy.median(thetar))) % (2.*numpy.pi)
thetaz= (numpy.pi+(thetaz-numpy.median(thetaz))) % (2.*numpy.pi)
#center around 0 (instead of pi)
thetar-= numpy.pi
thetap-= numpy.pi
thetaz-= numpy.pi
elif freq:
thetar= angle[0]
thetap= angle[1]
thetaz= angle[2]
else:
thetar= angle[0]-numpy.pi
thetap= angle[1]-numpy.pi
thetaz= angle[2]-numpy.pi
#Frequencies
Or= acfs[3]
Op= acfs[4]
Oz= acfs[5]
dOr= Or[debrisIndx]-numpy.median(Or)
dOp= Op[debrisIndx]-numpy.median(Op)
dOz= Oz[debrisIndx]-numpy.median(Oz)
dO= numpy.vstack((dOr,dOp,dOz))
dO4dir= copy.copy(dO)
dO4dir[:,dO4dir[0] < 0.]*= -1.
dOdir= numpy.median(dO4dir,axis=1)
dOdir/= numpy.sqrt(numpy.sum(dOdir**2.))
#parallel angle
dangle= numpy.vstack((thetar,thetap,thetaz))
return numpy.dot(dangle.T,dOdir)
def calc_apars_dm(thetar,thetap,thetaz,aa_dm,oparDir_dm,t=0.):
dangle= numpy.vstack((thetar-aa_dm[3]*t,
thetap-aa_dm[4]*t,
thetaz-aa_dm[5]*t))
return numpy.dot(dangle.T,oparDir_dm)
def calc_opars_dm(Or,Op,Oz,oparDir_dm):
dfreq= numpy.vstack((Or,Op,Oz))
return numpy.dot(dfreq.T,oparDir_dm)
``` |
{
"source": "jobpasin/Object-Detection-API",
"score": 2
} |
#### File: jobpasin/Object-Detection-API/detect_video.py
```python
import time
import json
from absl import app, flags, logging
from absl.flags import FLAGS
import cv2
import tensorflow as tf
from yolov3_tf2.models import (
YoloV3, YoloV3Tiny
)
from yolov3_tf2.dataset import transform_images
from yolov3_tf2.utils import draw_outputs
from centroidtracker import CentroidTracker
from performance import TimeMeasure
import threading
from queue import Queue
from concurrent.futures import ThreadPoolExecutor
flags.DEFINE_string('classes', './data/labels/coco.names', 'path to classes file')
flags.DEFINE_string('weights', './weights/yolov3.tf',
'path to weights file')
flags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_string('video', './data/video/paris.mp4',
'path to video file or number for webcam)')
flags.DEFINE_string('output', None, 'path to output video')
flags.DEFINE_string('output_format', 'XVID', 'codec used in VideoWriter when saving video to file')
flags.DEFINE_integer('num_classes', 80, 'number of classes in the model')
flags.DEFINE_string('logs', './detections/report2.json', 'path to result logs')
def img_read_wrapper(vid, out_queue: Queue, out_queue2: Queue):
print("img_read_wrapper: {}".format(threading.current_thread()))
global stop_threads
count = 0
frame_count = 0
while True:
_, img = vid.read()
if img is None or stop_threads:
logging.warning("Empty Frame:" + str(frame_count))
time.sleep(0.1)
count += 1
if count < 3:
continue
else:
print("Stopeed")
out_queue.put(None)
out_queue2.put(None)
break
else:
frame_count += 1
img_in = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_in = tf.expand_dims(img_in, 0)
img_in = transform_images(img_in, FLAGS.size)
out_queue.put(img_in)
out_queue2.put(img)
def predict_wrapper(yolo, in_queue: Queue, out_queue: Queue):
print("prediction_wrapper: {}".format(threading.current_thread()))
global stop_threads
fps = 0.0
while True:
img_in = in_queue.get()
if img_in is None or stop_threads:
out_queue.put(None)
break
t1 = time.time()
with TimeMeasure('Prediction'):
boxes, scores, classes, nums = yolo.predict(img_in)
fps = (fps + (1. / (time.time() - t1))) / 2
output = {'boxes': boxes, 'scores': scores, 'classes': classes, 'nums': nums, 'fps': fps}
out_queue.put(output)
def display_wrapper(out, FLAGS, in_queue: Queue, in2_queue: Queue):
print("display_wrapper: {}".format(threading.current_thread()))
global stop_threads
class_names = [c.strip() for c in open(FLAGS.classes).readlines()]
data_log = {}
frame_count = 0
ct = CentroidTracker()
while True:
data = in_queue.get()
img = in2_queue.get()
if data is None or img is None:
break
boxes, scores, classes, nums, fps = data['boxes'], data['scores'], data['classes'], data['nums'], data['fps']
with TimeMeasure('Display frame:' + str(frame_count)):
img, rects, log = draw_outputs(img, (boxes, scores, classes, nums), class_names)
img = cv2.putText(img, "FPS: {:.2f}".format(fps), (0, 30),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
objects = ct.update(rects)
if FLAGS.output:
out.write(img)
data_log['frame{}'.format(str(frame_count))] = log
frame_count += 1
cv2.imshow('output', img)
if cv2.waitKey(1) == ord('q'):
stop_threads = True
break
with open(FLAGS.logs, 'w') as f:
json.dump(data_log, f)
cv2.destroyAllWindows()
processed_img_queue = Queue()
raw_img_queue = Queue()
yolo_result_queue = Queue()
stop_threads = False
def main(_argv):
print("Start")
start_time = time.time()
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
if FLAGS.tiny:
yolo = YoloV3Tiny(classes=FLAGS.num_classes)
else:
yolo = YoloV3(classes=FLAGS.num_classes)
yolo.load_weights(FLAGS.weights)
logging.info('weights loaded')
logging.info('classes loaded')
times = []
try:
vid = cv2.VideoCapture(int(FLAGS.video))
except:
vid = cv2.VideoCapture(FLAGS.video)
out = None
if FLAGS.output:
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))
with ThreadPoolExecutor(max_workers=1) as executor:
executor.submit(img_read_wrapper, vid, processed_img_queue, raw_img_queue)
executor.submit(predict_wrapper, yolo, processed_img_queue, yolo_result_queue)
display_wrapper(out, FLAGS, yolo_result_queue, raw_img_queue)
# read_thread = threading.Thread(target=img_read_wrapper, args=(vid, processed_img_queue, raw_img_queue))
# predict_thread = threading.Thread(target=predict_wrapper, args=(yolo, processed_img_queue, yolo_result_queue))
# display_thread = threading.Thread(target=display_wrapper, args=(out, FLAGS, yolo_result_queue, raw_img_queue))
# threads = [read_thread, predict_thread, display_thread]
# for t in threads:
# t.start()
# for t in threads:
# t.join()
print("FInish", time.time() - start_time)
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
``` |
{
"source": "jobpasin/tensorflow-yolov4",
"score": 2
} |
#### File: jobpasin/tensorflow-yolov4/detect_wrapper.py
```python
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
import sys
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from tensorflow.python.saved_model import tag_constants
import tensorflow_yolov4.core.utils as utils
from absl import logging
import time
from queue import Queue, Empty, Full
from threading import Event
from typing import Union, List
from core.utils import TimeTracker
# from tensorflow_yolov4.core.yolov4 import filter_boxes
sys.stdout.flush()
class YoloV4:
def __init__(self, FLAGS, interested_class: Union[dict, List] = None):
logging.set_verbosity(logging.WARNING)
# config = ConfigProto()
# config.gpu_options.allow_growth = True
# session = tf.compat.v1.Session(config=config)
# tf.debugging.set_log_device_placement(True)
gpus = tf.config.experimental.list_physical_devices('GPU')
logging.debug(f"Number of GPU: {gpus}")
try:
# Currently, memory growth needs to be the same across GPUs
assert FLAGS.gpu < len(gpus), "--gpu is higher than number of gpu available. " \
"(Choose integer less than or equal to {} or use -1)".format(len(gpus) - 1)
if FLAGS.gpu != -1:
tf.config.experimental.set_visible_devices(gpus[FLAGS.gpu], 'GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
logging.debug("{} Physical GPUS, {} Logical GPUS".format(len(gpus), len(logical_gpus)))
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
self.tt_total = utils.TimeTracker(FLAGS.time_report_freq * 15, "YOLOV4 Total")
self.tt_infer = utils.TimeTracker(FLAGS.time_report_freq * 15, "Yolo Inference")
self.tt_suppress = utils.TimeTracker(FLAGS.time_report_freq * 15, "Yolo Suppress")
self.tt_filter = utils.TimeTracker(FLAGS.time_report_freq * 15, "Yolo Filter")
self.frame_id = 0
if len(gpus) == 0:
logging.warning('No GPU found')
self.strategy = tf.distribute.MirroredStrategy()
self.FLAGS = FLAGS
if type(interested_class) is list:
self.interested_class = {}
for i in interested_class:
self.interested_class[i] = i
else:
self.interested_class = interested_class
self.interpreter = None
with self.strategy.scope():
self.saved_model_loaded = None
self.infer = None
if FLAGS.framework == 'tflite':
self.interpreter = tf.lite.Interpreter(model_path=FLAGS.weights)
self.interpreter.allocate_tensors()
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
print("input details: ", self.input_details)
print("output details: ", self.output_details)
else:
self.saved_model_loaded = tf.saved_model.load(FLAGS.weights, tags=[tag_constants.SERVING])
self.infer = self.saved_model_loaded.signatures['serving_default']
if FLAGS.debug:
logging.set_verbosity(logging.DEBUG)
else:
logging.set_verbosity(logging.INFO)
temp_image = np.random.randint(0, 255, (608, 608, 3))
temp_queue, temp_queue2 = Queue(), Queue()
temp_queue.put([temp_image, 0])
temp_queue.put([None, None])
self.predict_wrapper(temp_queue, temp_queue2, 1, Event())
def predict_wrapper(self, in_queue: Queue, out_queue: Queue, batch_size: int, stop_event: Event):
if self.FLAGS.framework == 'tflite':
raise ValueError("")
image_buffer, frame_id_buffer, image_copy_buffer = [], [], []
STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(self.FLAGS)
input_size = self.FLAGS.size
stop_flag = False
while not stop_flag:
if stop_event.isSet(): # Clear data in queue if receiving stop signal
with in_queue.mutex:
in_queue.queue.clear()
break
# if in_queue.qsize() == 0:
# continue
# Fetching data
wait_start_time = time.time()
try:
frame, frame_id = in_queue.get(timeout=20)
except Empty:
logging.error("Yolo thread timeout. No image found for a certain time")
break
start_time = time.time()
# Add data into the list until the list has [batch_size] images
if frame is None:
stop_flag = True
else:
# self.wait_time_tracker.add(start_time - wait_start_time, frame_id)
frame_size = frame.shape[:2]
image_copy_buffer.append(frame.copy())
image_data = cv2.resize(frame, (input_size, input_size))
image_data = image_data / 255.
# image_data = image_data[np.newaxis, ...].astype(np.float32)
image_buffer.append(image_data)
frame_id_buffer.append(frame_id)
# Run detection when we have [batch_size] images, or has stop_flag (finish program)
if len(image_buffer) == batch_size or stop_flag:
if len(image_buffer) > 0:
image_data = np.stack(image_buffer).astype(np.float32)
batch_data = tf.constant(image_data)
infer_start = time.time()
pred_bbox = self.infer(batch_data)
infer_end = time.time()
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=self.FLAGS.iou,
score_threshold=self.FLAGS.score
)
pred_bboxes = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
non_max = time.time()
if self.interested_class is not None: # Filter only prediction of interested class
pred_bboxes = self.filter_class(pred_bboxes)
filter = time.time()
# Send prediction one by one
for i in range(np.shape(pred_bboxes[3])[0]):
pred_bbox = [pred_bboxes[0][i:i + 1, :, :], pred_bboxes[1][i:i + 1, :],
pred_bboxes[2][i:i + 1, :], pred_bboxes[3][i:i + 1]]
if not stop_event.isSet():
try:
out_queue.put([pred_bbox, frame_id_buffer[i], image_copy_buffer[i]], timeout=25)
except Full:
logging.error("bbox_queue: Read vid timeout. Post-processing might take too long")
break
end_time = time.time()
# self.tt_total.add(end_time - start_time, frame_id)
# self.tt_infer.add(infer_end - infer_start, frame_id)
# self.tt_suppress.add(non_max - infer_end, frame_id)
# self.tt_filter.add(filter - non_max, frame_id)
# Reset buffer after predictions
image_buffer, frame_id_buffer, image_copy_buffer = [], [], []
if stop_flag:
try:
out_queue.put([None, None, None], timeout=25)
except Full:
pass
logging.debug("Yolo thread complete")
def predict(self, frame):
"""
Predict with yolo
@param frame: ndarray(608,608,3) or list of ndarray of images
@return: list[4] of output : bboxes, scores, classes, num_rect or the list of list[4]
"""
STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(self.FLAGS)
input_size = self.FLAGS.size
video_path = self.FLAGS.video
# frame_id = 0
# start_time = time.time()
if type(frame) is not list:
frame = [frame]
image_data = []
for index in range(len(frame)):
image = frame[index]
frame_size = image.shape[:2]
image = cv2.resize(image, (input_size, input_size))
image = image / 255.
# image = image[np.newaxis, ...].astype(np.float32)
image_data.append(image)
image_data = np.stack(image_data).astype(np.float32)
if self.FLAGS.framework == 'tflite':
self.interpreter.set_tensor(self.input_details[0]['index'], image_data)
self.interpreter.invoke()
pred = [self.interpreter.get_tensor(self.output_details[i]['index']) for i in
range(len(self.output_details))]
if self.FLAGS.model == 'yolov3' and self.FLAGS.tiny == True:
boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
batch_data = tf.constant(image_data, name='batch_data')
pred_bbox = self.infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=self.FLAGS.iou,
score_threshold=self.FLAGS.score
)
pred_bboxes = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
if self.interested_class is not None:
pred_bboxes = self.filter_class(pred_bboxes)
# Send prediction one by one
out_pred_bbox = []
for i in range(np.shape(pred_bboxes[3])[0]):
pred_bbox = [pred_bboxes[0][i:i + 1, :, :], pred_bboxes[1][i:i + 1, :],
pred_bboxes[2][i:i + 1, :], pred_bboxes[3][i:i + 1]]
out_pred_bbox.append(pred_bbox)
self.frame_id += 1
# end_time = time.time()
# self.time_tracker.add(end_time - start_time, self.frame_id)
if len(out_pred_bbox) == 1:
return out_pred_bbox[0]
return out_pred_bbox
def filter_class(self, pred_bbox):
boxes_new, scores_new, classes_new, valid_detection_new = [], [], [], []
num_detection = np.shape(pred_bbox[2])[1]
num_batch = np.shape(pred_bbox[3])[0]
for batch in range(num_batch):
boxes_temp, scores_temp, classes_temp = [], [], []
current_detections = 0
for i in range(pred_bbox[3][batch]):
if int(pred_bbox[2][batch, i]) in self.interested_class:
boxes_temp.append(pred_bbox[0][batch, i, :])
scores_temp.append(pred_bbox[1][batch, i])
classes_temp.append(pred_bbox[2][batch, i])
current_detections += 1
for _ in range(num_detection - current_detections):
boxes_temp.append(np.zeros([4]))
scores_temp.append(0.0)
classes_temp.append(0.0)
# if current_detections == 0:
# boxes_new.append(np.zeros([1,4]))
# scores_new.append(np.zeros([1]))
# classes_new.append(np.zeros([0]))
# valid_detection_new.append(current_detections)
# # return ([np.zeros([1, 0, 4]), np.zeros([1, 0]), np.zeros([1, 0]), np.array([current_detections])])
# else:
boxes_new.append(np.stack(boxes_temp))
scores_new.append(np.stack(scores_temp))
classes_new.append(np.stack(classes_temp))
valid_detection_new.append(current_detections)
# return [np.expand_dims(np.stack(boxes_temp), axis=0), np.expand_dims(np.stack(scores_temp), axis=0),
# np.expand_dims(np.stack(classes_temp), axis=0), np.array([current_detections])]
return [np.stack(boxes_new), np.stack(scores_new), np.stack(classes_new), np.array(valid_detection_new)]
def draw_bbox(frame, pred_bbox, boundary=None, classes=None):
"""
Draw bounding box on yolov4 output
@param frame:
@param pred_bbox: Direct output from yolov4
@param boundary: WarpMatrix class. Show only bbox in boundary. Give None to show all bbox in the image
@param classes: List of full class name
@return:
"""
return utils.draw_bbox(frame, pred_bbox, classes=classes, show_label=True, boundary=boundary)
``` |
{
"source": "jobpassion/Surf",
"score": 3
} |
#### File: Surf/Surf-Mac/genjson.py
```python
import json
import sys
import re
config = {}
General = {}
Proxy = {}
Rule = {}
DOMAINKEYWORD = {}
DOMAINSUFFIX = {}
IPCIDR = {}
def fread(file):
dict = {}
i = 0
for line in file:
i = i+ 1
print ++i
if re.match('#', line):
print "# and pass"
continue
if re.match('//', line):
print "// and pass"
continue
if len(line) <=2:
print "no need" + line
continue
if re.match('\[General\]', line):
print "Found General"
dict = General
continue
elif re.match('\[Proxy\]', line):
print "Found Proxy"
dict = Proxy
continue
elif re.match('\[Rule\]', line):
dict = Rule
print "Found Proxy"
continue
else :
#print "Not found block this is rule" +
pass
#print line
list = line.split('=')
if len(list) >1:
print list
x = list[1].split(',')
if len(x)> 1:
if dict == Proxy:
hostconfig = {}
hostconfig['protocol'] = x[0].strip()
hostconfig['host'] = x[1].strip()
hostconfig['port'] = x[2].strip()
hostconfig['methd'] = x[3].strip()
hostconfig['passwd'] = x[4].strip()
#hostconfig['xx'] = x[5]
dict[list[0]] = hostconfig
else:
print line
dict[list[0]] = [str(j).strip() for j in x]
else:
dict[list[0]] = list[1]
else:
if re.match('DOMAIN-KEYWORD',line):
k = line.split(',')
#k.remove(k[0])
#r = ', '.join([str(x) for x in k])
rule = {}
rule["Proxy"] = k[2].strip()
try:
rule["force-remote-dns"] = k[3].strip()
except Exception, e:
print e
DOMAINKEYWORD[k[1]] = rule
elif re.match('DOMAIN-SUFFIX',line):
k = line.split(',')
#k.remove(k[0])
#r = ', '.join([str(x) for x in k])
rule = {}
rule["Proxy"] = k[2].strip()
try:
rule["force-remote-dns"] = k[3].strip()
except Exception, e:
print e
DOMAINSUFFIX[k[1]] = rule
elif re.match('IP-CIDR',line):
k = line.split(',')
#k.remove(k[0])
#r = ', '.join([str(x) for x in k])
rule = {}
rule["Proxy"] = k[2].strip()
try:
rule["no-resolve"] = k[3].strip()
except Exception, e:
print e
IPCIDR[k[1]] = rule
else:
pass
#print dict
print "[General]"
print General
General["author"] = "yarshure"
General["commnet"] = "这是comment"
print "[Proxy]"
print Proxy
print "[Rule]"
Rule["DOMAIN-KEYWORD"] = DOMAINKEYWORD
Rule["DOMAIN-SUFFIX"] = DOMAINSUFFIX
Rule["IP-CIDR"] = IPCIDR
#print Rule
print "cool"
config["General"] = General
config["Proxy"] = Proxy
config["Rule"] = Rule
saveRuslt()
# print "[DOMAINKEYWORD]"
# print DOMAINKEYWORD
# print "[DOMAINSUFFIX]"
# print DOMAINSUFFIX
# print "[IPCIDR]"
# print IPCIDR
def saveRuslt():
#print config
s = json.dumps(config)
f = open("surf.conf","w")
f.write(s)
f.close()
if __name__ == '__main__':
if len(sys.argv) == 1:
print "add surge file path"
surgeconfig = sys.argv[1]
print surgeconfig
file = open(surgeconfig)
fread(file)
file.close()
``` |
{
"source": "job/peeringdb",
"score": 2
} |
#### File: peeringdb/tests/test_locale_files.py
```python
import os
import re
from django.test import TestCase, Client
from django.conf import settings
from peeringdb_server.models import Organization, User
from string import Formatter
class LocaleFilesTest(TestCase):
def load_messages(self, language, filename="django.po"):
path = os.path.join(
os.path.dirname(__file__), "..", "locale", language, "LC_MESSAGES"
)
with open(os.path.join(path, filename), "r") as fh:
content = fh.read()
message_id = re.findall(r"\nmsgid (.+)\n", content)
message_str = re.findall(r"\nmsgstr (.+)\n", content)
messages = dict(zip(message_id, message_str))
return messages
# weblate handles all this now, and these tests are failing
# atm because the locale files no longer reside here
#
# weblate also makes sure that variable formatting matches, so this
# test is somewhat redundant at this point.
#
# either need to redo this test and make sure it generates the locale
# or remove it.
def _test_pt(self):
"""
Test portuguese locale files
"""
self.assert_variables(
self.load_messages("en_US"), self.load_messages("pt"), "PT"
)
self.assert_variables(
self.load_messages("en_US", filename="djangojs.po"),
self.load_messages("pt", filename="djangojs.po"),
"PT",
)
def assert_variables(self, en_messages, other_messages, language):
"""
Assert that the correct formatting variables exist
"""
errors = 0
for msgid, msgstr in en_messages.items():
# %(name)s and %s type variables
variables_a = sorted(re.findall("%\([^\(]+\)s|%s", msgid))
variables_b = sorted(re.findall("%\([^\(]+\)s|%s", other_messages[msgid]))
if variables_a != variables_b:
errors += 1
print "{} Locale variable error at msgid {} -> {}".format(
language, msgid, other_messages[msgid]
)
# {name} and {} type variables
variables_a = sorted(
[fn for _, fn, _, _ in Formatter().parse(msgid) if fn is not None]
)
variables_b = [
fn
for _, fn, _, _ in Formatter().parse(other_messages[msgid])
if fn is not None
]
if variables_a != variables_b:
errors += 1
print "{} Locale variable error at msgid {} -> {}".format(
language, msgid, other_messages[msgid]
)
assert errors == 0
``` |
{
"source": "JobPetrovcic/branje-izdelkov",
"score": 2
} |
#### File: JobPetrovcic/branje-izdelkov/branje_kataloga.py
```python
from branje_strani import shrani_stran, nalozi_stran_iz_datoteke
MAPA_KATALOGA = 'katalog'
def dobi_ime_strani_indeks(indeks):
return f'{MAPA_KATALOGA}\stran_{indeks}.html'
# v vzorec strani vstavimo indeks nato pa shranimo stran s tem url-jem
OSNOVA_SPAR_STRANI = 'https://www.spar.si'
VZOREC_STRANI = OSNOVA_SPAR_STRANI + '/online/c/root/?_=1635264522253&callback=parseResponse&category=root&i=1&m_sortProdResults_egisp=a&page={stevilka_strani}&pos=81701&q=*&sort=product-ecr-sortlev&sp_cs=UTF-8&sp_q_12=81701&sp_q_exact_14=root&sp_x_12=product-visible-pos'
def shrani_stran_indeks(indeks):
shrani_stran(VZOREC_STRANI.format(stevilka_strani=indeks), dobi_ime_strani_indeks(indeks))
# pobere in shrani vseh 255 strani (toliko jih je v času programiranja te naloge).
STEVILO_VSEH_STRANI_SPAR=255
def shrani_vse_strani_kataloga(stevilo_strani=STEVILO_VSEH_STRANI_SPAR):
for i in range(1, stevilo_strani + 1):
shrani_stran_indeks(i)
def nalozi_vse_strani_kataloga(stevilo_strani=STEVILO_VSEH_STRANI_SPAR):
vse_strani = []
for i in range(1, stevilo_strani+1):
vse_strani += [nalozi_stran_iz_datoteke(dobi_ime_strani_indeks(i))]
return vse_strani
# preveri ali je povezava res povezava do produkta (link se loči po tem da vsebuje: /p/)
import re
def je_povezava_do_produkta(povezava):
return re.search('\/online\/[\w\-]+\/p[\/|$]', povezava) is not None
# iz objekta HTML knjižnice requests_html prebere vse povezave na strani, ki predstavljajo posamezen izdelek
def poberi_povezave_do_produkta(html_objekt):
vse_povezave = html_objekt.links
povezave_do_produkta = []
for povezava in vse_povezave:
if je_povezava_do_produkta(povezava):
povezave_do_produkta+=[OSNOVA_SPAR_STRANI + povezava]
return povezave_do_produkta
def zdruzi_sezname(seznami):
zdruzen = []
for seznam in seznami:
for element in seznam:
zdruzen += [element]
return list(set(zdruzen))
def poberi_povezave_seznam(seznam_html_objektov):
seznam_seznamov = []
for html_objekt in seznam_html_objektov:
seznam_seznamov += [poberi_povezave_do_produkta(html_objekt)]
return zdruzi_sezname(seznam_seznamov)
# iz shranjenih datotek v mapi katalog prebere vse povezave in jih nato združi brez ponavljanja,
def obdelaj_vse_strani_kataloga():
vse_strani = nalozi_vse_strani_kataloga()
return poberi_povezave_seznam(vse_strani)
import csv
DATOTEKA_VSEH_POVEZAV_KATALOGA = 'vse_povezave_do_produkta.csv'
def shrani_povezave_kataloga(nalozi_strani_iz_interneta=False):
if nalozi_strani_iz_interneta:
shrani_vse_strani_kataloga()
vse_povezave = obdelaj_vse_strani_kataloga()
with open(DATOTEKA_VSEH_POVEZAV_KATALOGA, 'w') as datoteka:
zapis = csv.writer(datoteka, delimiter='\n')
zapis.writerow(vse_povezave)
``` |
{
"source": "JobPetrovcic/Projektna-naloga",
"score": 3
} |
#### File: Projektna-naloga/CMV/bralnik.py
```python
from wand.image import Image as WandImage
from PIL import Image
import os
import io
import pytesseract
# primer:
pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'
# tukaj dajte pot do tesseract knjižnice
# POZOR: ta funkcija je skopirana iz receiptparser python modula
def ocr_image(input_file, language, sharpen=False):
"""
:param input_file: str
Path to image to prettify
:return: str
"""
with io.BytesIO() as transfer:
with WandImage(filename=input_file) as img:
if sharpen:
img.auto_level()
img.sharpen(radius=0, sigma=4.0)
img.contrast()
img.save(transfer)
with Image.open(transfer) as img:
return pytesseract.image_to_string(img, lang=language)
dovoljeni_formati = ('.jpg', '.png')
# NASVET: slika naj bo crno bela, s cim vecjim kontrastom in locljivostjo,
# račun pa naj bo "svež", saj črnilo s časom zbledi
def dobi_besedilo(ime_datoteke):
if(isinstance(ime_datoteke, str)):
# preveri ali je slika pravega formata
ime, koncnica = os.path.splitext(ime_datoteke)
if koncnica in dovoljeni_formati:
try:
return ocr_image(
ime_datoteke, "slv", True).lower().splitlines()
except BaseException:
raise Exception(
"Nekaj je šlo narobe pri branju besedila iz slike: " +
ime_datoteke)
else:
raise ValueError(
f"Slika mora biti v {dovoljeni_formati} formatih.")
else:
raise ValueError("Ime datoteke ni niz.")
```
#### File: Projektna-naloga/CMV/model_uporabnikov.py
```python
import hashlib
import json
import random
import os
import model
class Uporabnik:
def __init__(
self,
uporabnisko_ime,
zasifrirano_geslo,
nakup,
stevilo_racunov=0):
self.uporabnisko_ime = uporabnisko_ime
self.zasifrirano_geslo = zasifrirano_geslo
self.nakup = nakup
self.stevilo_racunov = stevilo_racunov
@staticmethod
def prijava(uporabnisko_ime, geslo_v_cistopisu):
uporabnik = Uporabnik.iz_datoteke(uporabnisko_ime)
if uporabnik is None:
raise ValueError("Uporabniško ime ne obstaja")
elif uporabnik.preveri_geslo(geslo_v_cistopisu):
return uporabnik
else:
raise ValueError("Geslo je napačno")
@staticmethod
def ustvari_mapo(uporabnisko_ime):
os.mkdir(f"uporabniki/{uporabnisko_ime}")
os.mkdir(f"uporabniki/{uporabnisko_ime}/slike_racunov")
@staticmethod
def ime_racuna(uporabnisko_ime, indeks_racuna, koncnica):
if koncnica[0] == '.':
return f"uporabniki/{uporabnisko_ime}/slike_racunov/racun{indeks_racuna}{koncnica}"
else:
return f"uporabniki/{uporabnisko_ime}/slike_racunov/racun{indeks_racuna}.{koncnica}"
@staticmethod
def registracija(uporabnisko_ime, geslo_v_cistopisu):
if Uporabnik.iz_datoteke(uporabnisko_ime) is not None:
raise ValueError("Uporabniško ime že obstaja")
else:
Uporabnik.ustvari_mapo(uporabnisko_ime)
zasifrirano_geslo = Uporabnik._zasifriraj_geslo(geslo_v_cistopisu)
uporabnik = Uporabnik(
uporabnisko_ime,
zasifrirano_geslo,
model.Nakup())
uporabnik.v_datoteko()
return uporabnik
def _zasifriraj_geslo(geslo_v_cistopisu, sol=None):
if sol is None:
sol = str(random.getrandbits(32))
posoljeno_geslo = sol + geslo_v_cistopisu
h = hashlib.blake2b()
h.update(posoljeno_geslo.encode(encoding="utf-8"))
return f"{sol}${h.hexdigest()}"
def v_slovar(self):
return {
"uporabnisko_ime": self.uporabnisko_ime,
"zasifrirano_geslo": self.zasifrirano_geslo,
"nakup": self.nakup.v_slovar(),
"stevilo_racunov": self.stevilo_racunov
}
def v_datoteko(self):
with open(
Uporabnik.ime_uporabnikove_datoteke(self.uporabnisko_ime), "w+"
) as datoteka:
json.dump(self.v_slovar(), datoteka, ensure_ascii=True, indent=4)
def preveri_geslo(self, geslo_v_cistopisu):
sol, _ = self.zasifrirano_geslo.split("$")
return self.zasifrirano_geslo == Uporabnik._zasifriraj_geslo(
geslo_v_cistopisu, sol)
@staticmethod
def ime_uporabnikove_datoteke(uporabnisko_ime):
return f"uporabniki/{uporabnisko_ime}/{uporabnisko_ime}.json"
@staticmethod
def iz_slovarja(slovar):
uporabnisko_ime = slovar["uporabnisko_ime"]
zasifrirano_geslo = slovar["zasifrirano_geslo"]
stevilo_racunov = slovar["stevilo_racunov"]
nakup = model.Nakup.iz_slovarja(slovar["nakup"])
return Uporabnik(
uporabnisko_ime,
zasifrirano_geslo,
nakup,
stevilo_racunov)
@staticmethod
def iz_datoteke(uporabnisko_ime):
try:
with open(Uporabnik.ime_uporabnikove_datoteke(uporabnisko_ime)) as datoteka:
slovar = json.load(datoteka)
return Uporabnik.iz_slovarja(slovar)
except FileNotFoundError:
return None
``` |
{
"source": "JobQiu/EloMerchantKaggle",
"score": 3
} |
#### File: EloMerchantKaggle/model/basic_model.py
```python
from abc import abstractmethod
import lightgbm as lgb
from sklearn import model_selection
from preprocess.read_data import read_data
import json
from util.util import send_msg, map_list_combination
from tqdm import tqdm
class Model(object):
"""
it would be easier for fine-tuning later if all the models following this interface
"""
@abstractmethod
def __init__(self):
"""
:param n_kFold:
:param read_data_version:
:param random_state:
:param shuffle:
"""
self.so_far_best_rmse = 1000
self.so_far_best_params = None
pass
@abstractmethod
def train(self, params_list):
"""
iterate all the params combination to fine tune the model
:param params_list:
:return:
"""
list_params = map_list_combination(params_list)
for params in tqdm(list_params):
print("Current Params:{}".format(json.dumps(params)))
self._train(params)
print("------------- Train done --------------")
print("The best rmse score is : {}".format(self.so_far_best_rmse))
print("The corresponding params is: {}".format(json.dumps(self.so_far_best_params)))
pass
@abstractmethod
def _train(self, params):
"""
train the model according to a certain params,
compare the valid score with the global best valid score, and if better, update the best score and best params
:param params:
:return: prediction of test set corresponding to this params
"""
pass
@abstractmethod
def predict(self):
"""
:return: prediction of test set corresponding to the best params
"""
pass
@abstractmethod
def get_params_example(self):
"""
:return: return an example params, to be clear about what variables we can set
"""
pass
class LightGBMBasicModel(Model):
def __init__(self, n_kFold=10,
read_data_version='1.0',
random_state=2018,
shuffle=True,
data_dir="/content/EloMerchantKaggle/data",
verbose_eval=False,
so_far_best_rmse_threshold=4):
super(LightGBMBasicModel, self).__init__()
self.n_kFold = n_kFold
self.read_data_version = read_data_version
self.random_state = random_state
self.shuffle = shuffle
self.verbose_eval = verbose_eval
self.train_X, self.test_X, self.train_y = read_data(data_dir=data_dir,
version=self.read_data_version)
# during the fine tuning, we will try different combinations, and save the best score and params
self.so_far_best_rmse = so_far_best_rmse_threshold
self.so_far_best_params = None
pass
def predict(self):
return self._train(self.so_far_best_params, predict=True)
def _train(self, params=None, predict=False):
if params == None:
params = self.get_params_example()
eval_score = 0
pred_test = None
if predict:
pred_test = 0
# 1. k fold
kf = model_selection.KFold(n_splits=self.n_kFold, random_state=self.random_state, shuffle=self.shuffle)
for dev_index, val_index in kf.split(self.train_X):
# 2. train on subset
dev_X, val_X = self.train_X.loc[dev_index, :], self.train_X.loc[val_index, :]
dev_y, val_y = self.train_y[dev_index], self.train_y[val_index]
pred_test_tmp, model, evals_result = self.run_lgb(dev_X, dev_y, val_X, val_y, self.test_X, params)
eval_score += min(evals_result['valid_0']['rmse'])
if predict:
pred_test += pred_test_tmp
# 3. compare scores
eval_score = eval_score / (1.0 * self.n_kFold)
if eval_score < self.so_far_best_rmse:
print("Find better score {}".format(eval_score))
send_msg("Find better score {}".format(eval_score))
self.so_far_best_rmse = eval_score
self.so_far_best_params = params
if predict:
pred_test /= (self.n_kFold * 1.0)
return pred_test
def run_lgb(self, train_X, train_y, val_X, val_y, test_X, params):
lgtrain = lgb.Dataset(train_X, label=train_y)
lgval = lgb.Dataset(val_X, label=val_y)
evals_result = {}
model = lgb.train(params,
lgtrain,
1000,
valid_sets=[lgval],
early_stopping_rounds=100,
verbose_eval=self.verbose_eval,
evals_result=evals_result)
pred_test_y = model.predict(test_X, num_iteration=model.best_iteration)
return pred_test_y, model, evals_result
def get_params_example(self):
params = {
"objective": "regression",
"metric": "rmse",
"num_leaves": 30,
"min_child_weight": 50,
"learning_rate": 0.05,
"bagging_fraction": 0.7,
"feature_fraction": 0.7,
"bagging_frequency": 5,
"bagging_seed": 2018,
"verbosity": -1
}
return params
class XGBoostModel(Model):
pass
class LGBMModel(Model):
pass
class CatBoostModel(Model):
pass
if __name__ == "__main__":
model = LightGBMBasicModel()
# %%
```
#### File: EloMerchantKaggle/preprocess/read_data_congmin.py
```python
import pandas as pd
import datetime
import gc
import numpy as np
import featuretools as ft
import os
from util.util import compress_int, send_msg
class DataSet(object):
def __init__(self, data_dir='/content/EloMerchantKaggle/data/'):
self.data_dir = data_dir
self.train_x_path = os.path.join(self.data_dir, 'x_train_agg')
self.test_x_path = os.path.join(self.data_dir, 'x_test_agg')
self.train_y_path = os.path.join(self.data_dir, 'y_train')
pass
def get_train_dataset(self, reset=False, load=True):
if load and os.path.isfile(self.train_x_path) and os.path.isfile(self.train_y_path):
return pd.read_csv(self.train_x_path), pd.read_csv(self.train_y_path)
train_df, hist_df_train, new_trans_df_train = split_trans_into_train_test(data_dir=self.data_dir,
reset=reset)
return agg(train_df, hist_df_train, new_trans_df_train, True, self.train_x_path, self.train_y_path)
def get_test_dataset(self, load=True):
if load and os.path.isfile(self.test_x_path):
return pd.read_csv(self.test_x_path), None
print("loading test.csv ...")
d = {'feature_1': np.uint8, 'feature_2': np.uint8, 'feature_3': np.bool_}
test_df = pd.read_csv(os.path.join(self.data_dir, "test.csv"), parse_dates=["first_active_month"], dtype=d)
test_df.info(memory_usage='deep')
hist_df_test = pd.read_csv(os.path.join(self.data_dir, "historical_transactions_test.csv"),
parse_dates=["purchase_date"])
hist_df_test = compress_int(hist_df_test)
new_trans_df_test = pd.read_csv(os.path.join(self.data_dir, "new_merchant_transactions_test.csv"),
parse_dates=["purchase_date"])
new_trans_df_test = compress_int(new_trans_df_test)
send_msg("load done")
return agg(test_df, hist_df_test, new_trans_df_test, False, self.test_x_path, None)
def agg(train_df, hist_df, new_trans_df, isTrain, x_save_path, y_save_path):
train_df = train_df.copy(deep=True)
if isTrain:
target = train_df['target']
del train_df['target']
else:
target = None
es_train = ft.EntitySet(id='es_train')
es_train = es_train.entity_from_dataframe(entity_id='train', dataframe=train_df,
index='', time_index='first_active_month')
es_train = es_train.entity_from_dataframe(entity_id='history', dataframe=hist_df,
index='', time_index='purchase_date')
es_train = es_train.entity_from_dataframe(entity_id='new_trans', dataframe=new_trans_df,
index='', time_index='purchase_date')
# Relationship between clients and previous loans
r_client_previous = ft.Relationship(es_train['train']['card_id'],
es_train['history']['card_id'])
# Add the relationship to the entity set
es_train = es_train.add_relationship(r_client_previous)
r_client_previous = ft.Relationship(es_train['train']['card_id'],
es_train['new_trans']['card_id'])
# Add the relationship to the entity set
es_train = es_train.add_relationship(r_client_previous)
print(" dfs ing ... ")
x_train, _ = ft.dfs(entityset=es_train,
target_entity='train',
max_depth=2)
send_msg("dfs done! ")
print("saving...")
if target:
target.to_csv(y_save_path)
x_train['index'] = target.index
x_train.set_index('index')
x_train.to_csv(x_save_path)
return x_train, target
def split_trans_into_train_test(data_dir='/content/EloMerchantKaggle/data/', reset=False):
d = {'feature_1': np.uint8, 'feature_2': np.uint8, 'feature_3': np.bool_}
print("loading train.csv ...")
train_df = pd.read_csv(os.path.join(data_dir, "train.csv"), parse_dates=["first_active_month"], dtype=d)
train_df.info(memory_usage='deep')
if not reset and os.path.isfile(os.path.join(data_dir, "historical_transactions_train.csv")) and os.path.isfile(
os.path.join(data_dir, "new_merchant_transactions_train.csv")):
hist_df_train = pd.read_csv(os.path.join(data_dir, "historical_transactions_train.csv"),
parse_dates=["purchase_date"])
hist_df_train = compress_int(hist_df_train)
new_trans_df_train = pd.read_csv(os.path.join(data_dir, "new_merchant_transactions_train.csv"),
parse_dates=["purchase_date"])
new_trans_df_train = compress_int(new_trans_df_train)
send_msg("load done")
return train_df, hist_df_train, new_trans_df_train
pass
print("loading test.csv ...")
test_df = pd.read_csv(os.path.join(data_dir, "test.csv"), parse_dates=["first_active_month"], dtype=d)
test_df.info(memory_usage='deep')
print("loading historical_transactions.csv ...")
hist_df = pd.read_csv(os.path.join(data_dir, "historical_transactions.csv"), parse_dates=["purchase_date"])
print(' compressing ...')
hist_df = compressByDType(hist_df)
print(' split to get train hist ...')
hist_df_train = hist_df[hist_df.card_id.isin(set(train_df['card_id'].unique()))]
print(' saving ... ')
hist_df_train.to_csv(os.path.join(data_dir, "historical_transactions_train.csv"))
print(' split to get test hist ...')
hist_df_test = hist_df[hist_df.card_id.isin(set(test_df['card_id'].unique()))]
print(' saving ... ')
hist_df_test.to_csv(os.path.join(data_dir, "historical_transactions_test.csv"))
del hist_df_test
del hist_df
gc.collect()
print("loading new_merchant_transactions.csv ...")
new_trans_df = pd.read_csv(os.path.join(data_dir, "new_merchant_transactions.csv"),
parse_dates=["purchase_date"])
print(' compressing ...')
new_trans_df = compressByDType(new_trans_df)
print(' split to get train new trans ...')
new_trans_df_train = new_trans_df[new_trans_df.card_id.isin(set(train_df['card_id'].unique()))]
print(' saving ... ')
new_trans_df_train.to_csv(os.path.join(data_dir, "new_merchant_transactions_train.csv"))
print(' split to get test new trans ...')
new_trans_df_test = new_trans_df[new_trans_df.card_id.isin(set(test_df['card_id'].unique()))]
print(' saving ... ')
new_trans_df_test.to_csv(os.path.join(data_dir, "new_merchant_transactions_test.csv"))
del new_trans_df_test
del new_trans_df
gc.collect()
send_msg("split and save done")
return train_df, hist_df_train, new_trans_df_train
def agg2(df_train, df_test, df_hist_trans):
aggs = {}
for col in ['month', 'hour', 'weekofyear', 'dayofweek', 'year', 'subsector_id', 'merchant_category_id']:
aggs[col] = ['nunique']
aggs['purchase_amount'] = ['sum', 'max', 'min', 'mean', 'var']
aggs['installments'] = ['sum', 'max', 'min', 'mean', 'var']
aggs['purchase_date'] = ['max', 'min']
aggs['month_lag'] = ['max', 'min', 'mean', 'var']
aggs['month_diff'] = ['mean']
aggs['authorized_flag'] = ['sum', 'mean']
aggs['weekend'] = ['sum', 'mean']
aggs['category_1'] = ['sum', 'mean']
aggs['card_id'] = ['size']
for col in ['category_2', 'category_3']:
df_hist_trans[col + '_mean'] = df_hist_trans.groupby([col])['purchase_amount'].transform('mean')
aggs[col + '_mean'] = ['mean']
new_columns = get_new_columns('hist', aggs)
df_hist_trans_group = df_hist_trans.groupby('card_id').agg(aggs)
df_hist_trans_group.columns = new_columns
df_hist_trans_group.reset_index(drop=False, inplace=True)
df_hist_trans_group['hist_purchase_date_diff'] = (
df_hist_trans_group['hist_purchase_date_max'] - df_hist_trans_group['hist_purchase_date_min']).dt.days
df_hist_trans_group['hist_purchase_date_average'] = df_hist_trans_group['hist_purchase_date_diff'] / \
df_hist_trans_group['hist_card_id_size']
df_hist_trans_group['hist_purchase_date_uptonow'] = (
datetime.datetime.today() - df_hist_trans_group['hist_purchase_date_max']).dt.days
df_train = df_train.merge(df_hist_trans_group, on='card_id', how='left')
df_test = df_test.merge(df_hist_trans_group, on='card_id', how='left')
del df_hist_trans_group
gc.collect()
return df_train, df_test
def get_new_columns(name, aggs):
return [name + '_' + k + '_' + agg for k in aggs.keys() for agg in aggs[k]]
def compressByDType(df_new_merchant_trans):
"""
:param df_new_merchant_trans:
:return:
"""
df_new_merchant_trans = df_new_merchant_trans.drop(columns=['merchant_id'])
df_new_merchant_trans['category_2'].fillna(1.0, inplace=True)
df_new_merchant_trans['category_3'].fillna('D', inplace=True)
df_new_merchant_trans['authorized_flag'].fillna('Y', inplace=True)
df_new_merchant_trans['authorized_flag'] = df_new_merchant_trans['authorized_flag'].map({'Y': 1, 'N': 0})
df_new_merchant_trans['category_1'] = df_new_merchant_trans['category_1'].map({'Y': 1, 'N': 0})
df_new_merchant_trans['category_3'] = df_new_merchant_trans['category_3'].map({'A': 0, 'B': 1, 'C': 2, 'D': 3})
df_new_merchant_trans['category_1'] = pd.to_numeric(df_new_merchant_trans['category_1'], downcast='integer')
df_new_merchant_trans['category_2'] = pd.to_numeric(df_new_merchant_trans['category_2'], downcast='integer')
df_new_merchant_trans['category_3'] = pd.to_numeric(df_new_merchant_trans['category_3'], downcast='integer')
df_new_merchant_trans['merchant_category_id'] = pd.to_numeric(df_new_merchant_trans['merchant_category_id'],
downcast='integer')
df_new_merchant_trans['authorized_flag'] = pd.to_numeric(df_new_merchant_trans['authorized_flag'],
downcast='integer')
df_new_merchant_trans['city_id'] = pd.to_numeric(df_new_merchant_trans['city_id'], downcast='integer')
df_new_merchant_trans['installments'] = pd.to_numeric(df_new_merchant_trans['installments'], downcast='integer')
df_new_merchant_trans['state_id'] = pd.to_numeric(df_new_merchant_trans['state_id'], downcast='integer')
df_new_merchant_trans['subsector_id'] = pd.to_numeric(df_new_merchant_trans['subsector_id'], downcast='integer')
df_new_merchant_trans['month_lag'] = pd.to_numeric(df_new_merchant_trans['month_lag'], downcast='integer')
df_new_merchant_trans['purchase_date'] = pd.to_datetime(df_new_merchant_trans['purchase_date'])
df_new_merchant_trans['year'] = df_new_merchant_trans['purchase_date'].dt.year
df_new_merchant_trans['weekofyear'] = df_new_merchant_trans['purchase_date'].dt.weekofyear
df_new_merchant_trans['month'] = df_new_merchant_trans['purchase_date'].dt.month
df_new_merchant_trans['dayofweek'] = df_new_merchant_trans['purchase_date'].dt.dayofweek
df_new_merchant_trans['weekend'] = (df_new_merchant_trans.purchase_date.dt.weekday >= 5).astype(int)
df_new_merchant_trans['hour'] = df_new_merchant_trans['purchase_date'].dt.hour
# https://www.kaggle.com/c/elo-merchant-category-recommendation/discussion/73244
df_new_merchant_trans['month_diff'] = ((datetime.datetime.today() - df_new_merchant_trans[
'purchase_date']).dt.days) // 30
df_new_merchant_trans['month_diff'] += df_new_merchant_trans['month_lag']
df_new_merchant_trans['weekofyear'] = pd.to_numeric(df_new_merchant_trans['weekofyear'], downcast='integer')
df_new_merchant_trans['month'] = pd.to_numeric(df_new_merchant_trans['month'], downcast='integer')
df_new_merchant_trans['dayofweek'] = pd.to_numeric(df_new_merchant_trans['dayofweek'], downcast='integer')
df_new_merchant_trans['weekend'] = pd.to_numeric(df_new_merchant_trans['weekend'], downcast='integer')
df_new_merchant_trans['hour'] = pd.to_numeric(df_new_merchant_trans['hour'], downcast='integer')
df_new_merchant_trans['month_diff'] = pd.to_numeric(df_new_merchant_trans['month_diff'], downcast='integer')
df_new_merchant_trans.info(memory_usage='deep')
return df_new_merchant_trans
def read_data_c2(train_df,
test_df,
hist_df,
new_trans_df):
target = train_df['target']
del train_df['target']
es_train = ft.EntitySet(id='es_train')
es_test = ft.EntitySet(id='es_test')
es_train = es_train.entity_from_dataframe(entity_id='train', dataframe=train_df,
index='client_id', time_index='joined')
es_train = es_train.entity_from_dataframe(entity_id='history', dataframe=hist_df,
index='', time_index='purchase_date')
es_train = es_train.entity_from_dataframe(entity_id='new_trans', dataframe=new_trans_df,
index='', time_index='purchase_date')
# Relationship between clients and previous loans
r_client_previous = ft.Relationship(es_train['train']['card_id'],
es_train['history']['card_id'])
# Add the relationship to the entity set
es_train = es_train.add_relationship(r_client_previous)
r_client_previous = ft.Relationship(es_train['train']['card_id'],
es_train['new_trans']['card_id'])
# Add the relationship to the entity set
es_train = es_train.add_relationship(r_client_previous)
x_train, feature_names = ft.dfs(entityset=es_train, target_entity='train',
max_depth=2)
es_test = es_test.entity_from_dataframe(entity_id='test', dataframe=train_df,
index='client_id', time_index='joined')
es_test = es_test.entity_from_dataframe(entity_id='history', dataframe=hist_df,
index='', time_index='purchase_date')
es_test = es_test.entity_from_dataframe(entity_id='new_trans', dataframe=new_trans_df,
index='', time_index='purchase_date')
# Relationship between clients and previous loans
r_client_previous = ft.Relationship(es_test['test']['card_id'],
es_test['history']['card_id'])
# Add the relationship to the entity set
es_test = es_test.add_relationship(r_client_previous)
r_client_previous = ft.Relationship(es_test['test']['card_id'],
es_test['new_trans']['card_id'])
# Add the relationship to the entity set
es_test = es_test.add_relationship(r_client_previous)
x_test, feature_names = ft.dfs(entityset=es_test, target_entity='test',
max_depth=2)
return x_train, target, x_test
def read_data_c(train_df,
test_df,
hist_df,
new_trans_df,
version='c1.0'):
"""
:param train_df:
:param test_df:
:param hist_df:
:param new_trans_df:
:param version:
:return:
"""
# 0. compress
print("compressing ... ")
hist_df = compressByDType(hist_df)
new_trans_df = compressByDType(new_trans_df)
print("compressing done")
if version == 'c2.0':
return read_data_c2(train_df,
test_df,
hist_df,
new_trans_df)
# 1. [整合成一个data frame] merger them as one df
print("agg ...")
agg2(train_df, test_df, hist_df)
agg2(train_df, test_df, new_trans_df)
del hist_df
gc.collect()
del new_trans_df
gc.collect()
train_df['outliers'] = 0
train_df.loc[train_df['target'] < -30, 'outliers'] = 1
train_df['outliers'].value_counts()
for df in [train_df, test_df]:
df['first_active_month'] = pd.to_datetime(df['first_active_month'])
df['dayofweek'] = df['first_active_month'].dt.dayofweek
df['weekofyear'] = df['first_active_month'].dt.weekofyear
df['month'] = df['first_active_month'].dt.month
df['elapsed_time'] = (datetime.datetime.today() - df['first_active_month']).dt.days
df['hist_first_buy'] = (df['hist_purchase_date_min'] - df['first_active_month']).dt.days
df['new_hist_first_buy'] = (df['new_hist_purchase_date_min'] - df['first_active_month']).dt.days
for f in ['hist_purchase_date_max', 'hist_purchase_date_min', 'new_hist_purchase_date_max', \
'new_hist_purchase_date_min']:
df[f] = df[f].astype(np.int64) * 1e-9
df['card_id_total'] = df['new_hist_card_id_size'] + df['hist_card_id_size']
df['purchase_amount_total'] = df['new_hist_purchase_amount_sum'] + df['hist_purchase_amount_sum']
for f in ['feature_1', 'feature_2', 'feature_3']:
order_label = train_df.groupby([f])['outliers'].mean()
train_df[f] = train_df[f].map(order_label)
test_df[f] = test_df[f].map(order_label)
target = train_df['target']
del train_df['target']
return train_df, test_df, target
def read_data_c1(train_df,
test_df,
hist_df,
new_trans_df):
pass
# train_df, hist_df_train, new_trans_df_train = split_trans_into_train_test()
```
#### File: EloMerchantKaggle/preprocess/read_data.py
```python
import os
import pandas as pd
import numpy as np
import random
from collections import deque
import featuretools as ft
import copy
def read_data_z(data_dir="/content/EloMerchantKaggle/data",
version = '2',debug = False):
"""
"""
print("loading train.csv ..." )
train_df = pd.read_csv(os.path.join(data_dir, "train.csv"), parse_dates=["first_active_month"])
print("loading test.csv ..." )
test_df = pd.read_csv(os.path.join(data_dir, "test.csv"), parse_dates=["first_active_month"])
print("loading historical_transactions.csv ..." )
hist_df = pd.read_csv(os.path.join(data_dir, "historical_transactions.csv"))
print("loading new_merchant_transactions.csv ..." )
new_trans_df = pd.read_csv(os.path.join(data_dir, "new_merchant_transactions.csv"))
target_col = "target"
if debug:
train_df_debug = pd.DataFrame(columns=train_df.columns)
hist_df_debug = pd.DataFrame(columns=hist_df.columns)
new_trans_df_debug = pd.DataFrame(columns=new_trans_df.columns)
new_models = [train_df_debug,hist_df_debug,new_trans_df_debug]
old_models = [train_df,hist_df,new_trans_df]
seeds = [5,23,43]
for i,data in enumerate(old_models):
random.seed(seeds[i])
sample_index = random.sample(range(data.shape[0]),int(data.shape[0]/4))
for j in sample_index:
new_models[i].append(data.iloc(j))
del data
train_df = train_df_debug.copy()
hist_df = hist_df_debug.copy()
new_trans_df = new_trans_df_debug.copy()
if version != '' and version.strip()[0] == '1':
pass
elif version != '' and version.strip()[0] == '2':
train_df_stat, test_df_stat = add_stat_features(train_df, test_df, hist_df, new_trans_df)
# Feature engineering: polynomial features
poly_features = train_df_stat[['feature_1','feature_2','feature_3']]
poly_features_test = test_df_stat[['feature_1','feature_2','feature_3']]
poly_target = train_df_stat[['target']]
from sklearn.preprocessing import PolynomialFeatures
poly_transform = PolynomialFeatures(degree=3)
poly_transform.fit(poly_features)
poly_transform.fit(poly_features_test)
poly_features = poly_transform.transform(poly_features)
poly_features_test = poly_transform.transform(poly_features_test)
# Create a dataframe of the features
poly_features = pd.DataFrame(poly_features,
columns = poly_transform.get_feature_names(['feature_1', 'feature_2',
'feature_3']))
poly_features_test = pd.DataFrame(poly_features,
columns= poly_transform.get_feature_names(['feature_1', 'feature_2',
'feature_3']))
poly_features['card_id'] = train_df_stat['card_id']
poly_features_test['card_id'] = test_df_stat['card_id']
train_df_poly = pd.merge(train_df_stat,poly_features,on='card_id',how='left')
test_df_poly = pd.merge(test_df_stat,poly_features_test,on='card_id',how='left')
cols_to_use = [ "feature_1_x", "feature_2_x", "feature_3_x","year", "month",
"num_hist_transactions", "sum_hist_trans", "mean_hist_trans", "std_hist_trans",
"min_hist_trans", "max_hist_trans",
"num_merch_transactions", "sum_merch_trans", "mean_merch_trans", "std_merch_trans",
"min_merch_trans", "max_merch_trans",
]
print(cols_to_use)
cols_to_use.extend(poly_transform.get_feature_names(['feature_1', 'feature_2', 'feature_3'])[4:])
print(cols_to_use)
print(train_df_poly[:10])
return train_df_poly[cols_to_use] ,test_df_poly[cols_to_use], train_df_poly[target_col].values
else:
# default only use statistic features
cols_to_use = ["feature_1", "feature_2", "feature_3"]
return train_df[cols_to_use], test_df[cols_to_use], train_df[target_col].values
# add additional statistical features
def add_stat_features(train_df,test_df,hist_df,new_trans_df):
train_df_stat = train_df.copy(deep = True)
test_df_stat = test_df.copy(deep=True)
gdf = hist_df.groupby("card_id")
gdf = gdf["purchase_amount"].size().reset_index()
gdf.columns = ["card_id", "num_hist_transactions"]
train_df_stat = pd.merge(train_df_stat, gdf, on="card_id", how="left")
test_df_stat = pd.merge(test_df_stat, gdf, on="card_id", how="left")
bins = [0, 10, 20, 30, 40, 50, 75, 100, 150, 200, 500, 10000]
train_df_stat['binned_num_hist_transactions'] = pd.cut(train_df_stat['num_hist_transactions'], bins)
gdf = hist_df.groupby("card_id")
gdf = gdf["purchase_amount"].agg(['sum', 'mean', 'std', 'min', 'max']).reset_index()
gdf.columns = ["card_id", "sum_hist_trans", "mean_hist_trans", "std_hist_trans", "min_hist_trans", "max_hist_trans"]
train_df_stat = pd.merge(train_df_stat, gdf, on="card_id", how="left")
test_df_stat = pd.merge(test_df_stat, gdf, on="card_id", how="left")
bins = np.percentile(train_df_stat["sum_hist_trans"], range(0,101,10))
train_df_stat['binned_sum_hist_trans'] = pd.cut(train_df_stat['sum_hist_trans'], bins)
bins = np.percentile(train_df_stat["mean_hist_trans"], range(0,101,10))
train_df_stat['binned_mean_hist_trans'] = pd.cut(train_df_stat['mean_hist_trans'], bins)
gdf = new_trans_df.groupby("card_id")
gdf = gdf["purchase_amount"].size().reset_index()
gdf.columns = ["card_id", "num_merch_transactions"]
train_df_stat = pd.merge(train_df_stat, gdf, on="card_id", how="left")
test_df_stat = pd.merge(test_df_stat, gdf, on="card_id", how="left")
bins = [0, 10, 20, 30, 40, 50, 75, 10000]
train_df_stat['binned_num_merch_transactions'] = pd.cut(train_df_stat['num_merch_transactions'], bins)
gdf = new_trans_df.groupby("card_id")
gdf = gdf["purchase_amount"].agg(['sum', 'mean', 'std', 'min', 'max']).reset_index()
gdf.columns = ["card_id", "sum_merch_trans", "mean_merch_trans", "std_merch_trans", "min_merch_trans", "max_merch_trans"]
train_df_stat = pd.merge(train_df_stat, gdf, on="card_id", how="left")
test_df_stat = pd.merge(test_df_stat, gdf, on="card_id", how="left")
bins = np.nanpercentile(train_df_stat["sum_merch_trans"], range(0,101,10))
train_df_stat['binned_sum_merch_trans'] = pd.cut(train_df_stat['sum_merch_trans'], bins)
bins = np.nanpercentile(train_df_stat["mean_merch_trans"], range(0,101,10))
train_df_stat['binned_mean_merch_trans'] = pd.cut(train_df_stat['mean_merch_trans'], bins)
train_df_stat["year"] = train_df_stat["first_active_month"].dt.year
test_df_stat["year"] = test_df_stat["first_active_month"].dt.year
train_df_stat["month"] = train_df_stat["first_active_month"].dt.month
test_df_stat["month"] = test_df_stat["first_active_month"].dt.month
return train_df_stat, test_df_stat
if __name__ == "__main__":
x_train, y_train, x_test = read_data(data_dir ="/Users/xavier.qiu/Documents/Kaggle/EloMerchantKaggle/data")
#%%
``` |
{
"source": "JobQiu/kaggle_bowl18",
"score": 2
} |
#### File: JobQiu/kaggle_bowl18/calculate_stage1.py
```python
import csv
import numpy as np
from scipy import ndimage as ndi
from tqdm import tqdm
def masks2labels(masks, roll=False):
num_mask = masks.shape[2 if roll else 0]
rolled = np.rollaxis(masks, 2) if roll else masks
mask_list = list(rolled)
labels = np.zeros((masks.shape[0 if roll else 1], masks.shape[1 if roll else 2]), dtype=np.uint16)
for i in range(num_mask):
mask_list[i] = np.where(mask_list[i] > 0, 1, 0)
labels = np.where(mask_list[i] > 0, i + 1, labels)
return labels
def label_loss(label_true, label_pred):
true_objects = len(np.unique(label_true))
pred_objects = len(np.unique(label_pred))
intersection = np.histogram2d(label_true.flatten(), label_pred.flatten(), bins=(true_objects, pred_objects))[0]
area_true = np.histogram(label_true, bins=true_objects)[0]
area_pred = np.histogram(label_pred, bins=pred_objects)[0]
area_true = np.expand_dims(area_true, -1)
area_pred = np.expand_dims(area_pred, 0)
union = area_true + area_pred - intersection
intersection = intersection[1:, 1:]
union = union[1:, 1:]
union[union == 0] = 1e-9
iou = intersection / union
def precision_at(threshold, iou):
matches = iou > threshold
true_positives = np.sum(matches, axis=1) == 1 # Correct objects
false_positives = np.sum(matches, axis=0) == 0 # Missed objects
false_negatives = np.sum(matches, axis=1) == 0 # Extra objects
tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives)
return tp, fp, fn
prec = []
for t in np.arange(0.5, 1.0, 0.05):
tp, fp, fn = precision_at(t, iou)
p = tp / (tp + fp + fn)
prec.append(p)
return np.mean(prec)
def compute_map_nuclei(true_masks, pred_masks):
true_labels = masks2labels(true_masks)
pred_labels = masks2labels(pred_masks)
return label_loss(true_labels, pred_labels)
def get_stage1_masks(true_filename, pred_filename):
stage1_mask_list = {}
stage1_test_sizes = {}
stage1_pred_mask_list = {}
with open(true_filename, newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
rleNumbers = [int(s) for s in row['EncodedPixels'].split(' ')]
rlePairs = np.array(rleNumbers).reshape(-1, 2)
if row['ImageId'] not in stage1_test_sizes:
stage1_test_sizes[row['ImageId']] = [int(row['Height']), int(row['Width'])]
height = stage1_test_sizes[row['ImageId']][0]
width = stage1_test_sizes[row['ImageId']][1]
mask = np.zeros(height * width, dtype=np.uint8)
for index, length in rlePairs:
index -= 1
mask[index:index + length] = 1
mask = mask.reshape(width, height)
mask = mask.T
if row['ImageId'] not in stage1_mask_list:
stage1_mask_list[row['ImageId']] = []
stage1_mask_list[row['ImageId']].append(mask)
with open(pred_filename, newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
rleNumbers = [int(s) for s in row['EncodedPixels'].split(' ')]
rlePairs = np.array(rleNumbers).reshape(-1, 2)
height = stage1_test_sizes[row['ImageId']][0]
width = stage1_test_sizes[row['ImageId']][1]
mask = np.zeros(height * width, dtype=np.uint8)
for index, length in rlePairs:
index -= 1
mask[index:index + length] = 1
mask = mask.reshape(width, height)
mask = mask.T
if row['ImageId'] not in stage1_pred_mask_list:
stage1_pred_mask_list[row['ImageId']] = []
stage1_pred_mask_list[row['ImageId']].append(mask)
APs = []
for imageId in tqdm(stage1_mask_list):
true_masks = np.array(stage1_mask_list[imageId])
pred_masks = np.array(stage1_pred_mask_list[imageId])
APs.append(compute_map_nuclei(true_masks, pred_masks))
return np.mean(APs)
# Put the solution of stage1 here
true_filename = 'stage1_solution.csv'
# Put your solution here
pred_filename = 'submit_history/submit-04-11.csv'
print(get_stage1_masks(true_filename, pred_filename))
```
#### File: JobQiu/kaggle_bowl18/pre_process_flip_images.py
```python
import os
from tqdm import tqdm
from skimage.io import imread, imsave
import numpy as np
import datetime
problem_ids = list()
problem_ids.append('7b38c9173ebe69b4c6ba7e703c0c27f39305d9b2910f46405993d2ea7a963b80')
problem_ids.append('b1eb0123fe2d8c825694b193efb7b923d95effac9558ee4eaf3116374c2c94fe')
problem_ids.append('9bb6e39d5f4415bc7554842ee5d1280403a602f2ba56122b87f453a62d37c06e')
problem_ids.append('1f0008060150b5b93084ae2e4dabd160ab80a95ce8071a321b80ec4e33b58aca')
problem_ids.append('58c593bcb98386e7fd42a1d34e291db93477624b164e83ab2afa3caa90d1d921')
problem_ids.append('adc315bd40d699fd4e4effbcce81cd7162851007f485d754ad3b0472f73a86df')
problem_ids.append('12aeefb1b522b283819b12e4cfaf6b13c1264c0aadac3412b4edd2ace304cb40')
problem_ids.append('0a7d30b252359a10fd298b638b90cb9ada3acced4e0c0e5a3692013f432ee4e9')
def getNameFromTime():
now = datetime.datetime.now()
return (str)(now.minute) + (str)(now.second) + (str)(now.microsecond)
import warnings
warnings.filterwarnings("ignore")
IMG_CHANNELS = 3
os.system('rm -rf stage1_train_copy')
os.system('mkdir stage1_train_copy')
TRAIN_PATH = 'stage1_train/'
TEST_PATH = 'stage1_test/'
train_ids = next(os.walk(TRAIN_PATH))[1]
test_ids = next(os.walk(TEST_PATH))[1]
print('Getting and resizing train images and masks ... ')
for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):
if id_ in problem_ids:
continue
path = TRAIN_PATH + id_
image__ = imread(path + '/images/' + id_ + '.png')[:, :, :IMG_CHANNELS]
mask_imgs_flip = list()
mask_imgs = list()
id__ = 'i' + getNameFromTime()
temp_imgs = next(os.walk(path + '/masks/'))[2]
assert len(temp_imgs) > 0
for mask in temp_imgs:
mask_img = imread(path + '/masks/' + mask)
mask_imgs.append(mask_img)
mask_imgs_flip.append(np.fliplr(mask_img))
image__flip = np.fliplr(image__)
id__flip = 'i' + getNameFromTime()
dirPath = ''
os.mkdir(os.path.join(dirPath, 'stage1_train_copy/' + id__))
os.mkdir(os.path.join(dirPath, 'stage1_train_copy/' + id__ + '/images/'))
os.mkdir(os.path.join(dirPath, 'stage1_train_copy/' + id__ + '/masks/'))
path___ = os.path.join(dirPath, 'stage1_train_copy/' + id__ + '/images/' + id__ + '.png')
imsave(path___, image__)
for mask_ in mask_imgs:
mask_id = 'm' + getNameFromTime()
path__m = os.path.join(dirPath, 'stage1_train_copy/' + id__ + '/masks/' + mask_id + '.png')
imsave(path__m, mask_)
os.mkdir(os.path.join(dirPath, 'stage1_train_copy/' + id__flip))
os.mkdir(os.path.join(dirPath, 'stage1_train_copy/' + id__flip + '/images/'))
os.mkdir(os.path.join(dirPath, 'stage1_train_copy/' + id__flip + '/masks/'))
path___ = os.path.join(dirPath, 'stage1_train_copy/' + id__flip + '/images/' + id__flip + '.png')
imsave(path___, image__flip)
for mask_ in mask_imgs_flip:
mask_id = 'm' + getNameFromTime()
path__m = os.path.join(dirPath, 'stage1_train_copy/' + id__flip + '/masks/' + mask_id + '.png')
imsave(path__m, mask_)
```
#### File: JobQiu/kaggle_bowl18/pre_test_rle_to_mask.py
```python
import numpy as np
import pandas as pd
import os
import datetime
from PIL import Image
def rle_decode(rle, shape):
"""Decodes an RLE encoded list of space separated
numbers and returns a binary mask."""
rle = list(map(int, rle.split()))
rle = np.array(rle, dtype=np.int32).reshape([-1, 2])
rle[:, 1] += rle[:, 0]
rle -= 1
mask = np.zeros([shape[0] * shape[1]], np.bool)
for s, e in rle:
assert 0 <= s < mask.shape[0]
assert 1 <= e <= mask.shape[0], "shape: {} s {} e {}".format(shape, s, e)
mask[s:e] = 1
# Reshape and transpose
mask = mask.reshape([shape[1], shape[0]]).T
return mask
stage1_solution = pd.read_csv('stage1_solution.csv')
def getNameFromTime():
now = datetime.datetime.now()
return (str)(now.minute) + (str)(now.second) + (str)(now.microsecond)
for index, row in stage1_solution.iterrows():
id_ = row['ImageId']
if os.path.exists('stage1_test/'+id_):
temp = rle_decode(rle = row['EncodedPixels'],shape=[row['Height'],row['Width']])
im = Image.fromarray(temp)
path_temp = 'stage1_test/'+id_+'/masks'
if not os.path.exists(path_temp):
os.mkdir(path_temp)
path_temp = 'stage1_test/'+id_+'/masks/'+getNameFromTime()+'.png'
im.save(path_temp)
#%%
``` |
{
"source": "JobQiu/Kaggle-NLP-Summary",
"score": 2
} |
#### File: Kaggle-NLP-Summary/common/evaluate.py
```python
from keras import backend as K
import numpy as np
from sklearn.metrics import f1_score
def tweak_threshold(pred, truth):
thresholds = []
scores = []
for thresh in np.arange(0.1, 0.501, 0.01):
thresh = np.round(thresh, 2)
thresholds.append(thresh)
score = f1_score(truth, (pred > thresh).astype(int))
scores.append(score)
return np.max(scores), thresholds[np.argmax(scores)]
def f1_keras(y_true, y_pred):
"""
:param y_true:
:param y_pred:
:return:
"""
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
```
#### File: Kaggle-NLP-Summary/common/preprocess.py
```python
import re
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
def getWordIndices(X, len_voc, max_len):
t = Tokenizer(num_words=len_voc)
t.fit_on_texts(X)
X = t.texts_to_sequences(X)
X = pad_sequences(X, maxlen=max_len)
return X, t.word_index
def deal_with_special_characters(text):
for p in punct_mapping:
text = text.replace(p, punct_mapping[p])
for p in punct:
text = text.replace(p, f' {p} ')
for s in specials:
text = text.replace(s, specials[s])
return text
specials = {'\u200b': ' ', '…': ' ... ', '\ufeff': '', 'करना': '',
'है': ''} # Other special characters that I have to deal with in last
punct = "/-'?!.,#$%\'()*+-/:;<=>@[\\]^_`{|}~" + '""“”’' + '∞θ÷α•à−β∅³π‘₹´°£€\×™√²—–&'
punct_mapping = {"‘": "'", "₹": "e", "´": "'", "°": "", "€": "e", "™": "tm", "√": " sqrt ", "×": "x", "²": "2",
"—": "-", "–": "-", "’": "'", "_": "-", "`": "'", '“': '"', '”': '"', '“': '"', "£": "e",
'∞': 'infinity', 'θ': 'theta', '÷': '/', 'α': 'alpha', '•': '.', 'à': 'a', '−': '-', 'β': 'beta',
'∅': '', '³': '3', 'π': 'pi', }
def deal_with_punct(x):
x = str(x)
for punct in "/-'":
x = x.replace(punct, ' ')
for punct in '?!.,"#$%\'()*+-/:;<=>@[\\]^_`{|}~' + '“”’':
x = x.replace(punct, '')
return x
def deal_with_numbers(x):
"""
hmm why is "##" in there? Simply because as a reprocessing all numbers bigger
than 9 have been replaced by hashs. I.e. 15 becomes ## while 123 becomes ###
or 15.80€ becomes ##.##€. So lets mimic this pre-processing step to further
improve our embeddings coverage
:param x:
:return:
"""
x = re.sub('[0-9]{5,}', '#####', x)
x = re.sub('[0-9]{4}', '####', x)
x = re.sub('[0-9]{3}', '###', x)
x = re.sub('[0-9]{2}', '##', x)
return x
def deal_with_misspell(x):
"""
"""
pass
def _get_mispell(mispell_dict):
mispell_re = re.compile('(%s)' % '|'.join(mispell_dict.keys()))
return mispell_dict, mispell_re
mispellings, mispellings_re = _get_mispell(misspell_mapping)
def replace(match):
return mispellings[match.group(0)]
return mispellings_re.sub(replace, x)
misspell_mapping = {'colour': 'color', 'centre': 'center', 'favourite': 'favorite', 'travelling': 'traveling',
'counselling': 'counseling', 'theatre': 'theater', 'cancelled': 'canceled', 'labour': 'labor',
'organisation': 'organization', 'wwii': 'world war 2', 'citicise': 'criticize',
'youtu ': 'youtube ',
'Qoura': 'Quora', 'sallary': 'salary', 'Whta': 'What', 'narcisist': 'narcissist', 'howdo': 'how do',
'whatare': 'what are', 'howcan': 'how can', 'howmuch': 'how much', 'howmany': 'how many',
'whydo': 'why do', 'doI': 'do I', 'theBest': 'the best', 'howdoes': 'how does',
'mastrubation': 'masturbation', 'mastrubate': 'masturbate', "mastrubating": 'masturbating',
'pennis': 'penis', 'Etherium': 'Ethereum', 'narcissit': 'narcissist', 'bigdata': 'big data',
'2k17': '2017', '2k18': '2018', 'qouta': 'quota', 'exboyfriend': 'ex boyfriend',
'airhostess': 'air hostess', "whst": 'what', 'watsapp': 'whatsapp',
'demonitisation': 'demonetization',
'demonitization': 'demonetization', 'demonetisation': 'demonetization', 'pokémon': 'Pokemon',
'Pokémon': 'Pokemon',
'Quorans': 'Quora users', 'cryptocurrencies': 'cryptocurrency', 'Blockchain': 'blockchain',
'fiancé': 'fiance', 'wwwyoutubecom': 'youtube',
'Cryptocurrency': 'cryptocurrency', 'Quoras': 'Quora', 'Golang': 'golang', 'Whst': 'What',
'coinbase': 'Coinbase', 'tensorflow': 'TensorFlow'}
def deal_with_contraction(text):
specials = ["’", "‘", "´", "`"]
for s in specials:
text = text.replace(s, "'")
text = ' '.join([contraction_mapping[t] if t in contraction_mapping else t for t in text.split(" ")])
return text
contraction_mapping = {"ain't": "is not", "aren't": "are not", "can't": "cannot", "'cause": "because",
"could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not",
"don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not",
"he'd": "he would", "he'll": "he will", "he's": "he is", "how'd": "how did",
"how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would",
"I'd've": "I would have", "I'll": "I will", "I'll've": "I will have", "I'm": "I am",
"I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will",
"i'll've": "i will have", "i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would",
"it'd've": "it would have", "it'll": "it will", "it'll've": "it will have", "it's": "it is",
"let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have",
"mightn't": "might not", "mightn't've": "might not have", "must've": "must have",
"mustn't": "must not", "mustn't've": "must not have", "needn't": "need not",
"needn't've": "need not have", "o'clock": "of the clock", "oughtn't": "ought not",
"oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not",
"shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have",
"she'll": "she will", "she'll've": "she will have", "she's": "she is",
"should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have",
"so've": "so have", "so's": "so as", "this's": "this is", "that'd": "that would",
"that'd've": "that would have", "that's": "that is", "there'd": "there would",
"there'd've": "there would have", "there's": "there is", "here's": "here is",
"they'd": "they would", "they'd've": "they would have", "they'll": "they will",
"they'll've": "they will have", "they're": "they are", "they've": "they have",
"to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have",
"we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have",
"weren't": "were not", "what'll": "what will", "what'll've": "what will have",
"what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is",
"when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have",
"who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have",
"why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not",
"won't've": "will not have", "would've": "would have", "wouldn't": "would not",
"wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would",
"y'all'd've": "you all would have", "y'all're": "you all are", "y'all've": "you all have",
"you'd": "you would", "you'd've": "you would have", "you'll": "you will",
"you'll've": "you will have", "you're": "you are", "you've": "you have"}
```
#### File: Kaggle-NLP-Summary/common/util.py
```python
import requests
import json
import os
import copy
def load_hyperparameters():
"""
"""
pass
def load_config():
"""
load config from Kaggle-NLP-Summary config.json
:return:
"""
json_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
with open(os.path.join(json_path, "config.json")) as f:
config = json.load(f)
print("Config - :")
print(config)
f.close()
return config
def send_msg(msg="...",
dingding_url="https://oapi.dingtalk.com/robot/send?access_token=774cd9150c43c35e43ec93bc6c91553a5c652417c10fd577bec117ed9f3e3182"
):
'''
this method is used to send myself a message to remind
'''
headers = {"Content-Type": "application/json; charset=utf-8"}
post_data = {
"msgtype": "text",
"text": {
"content": msg
}
}
requests.post(dingding_url, headers=headers,
data=json.dumps(post_data))
def map_list_combination(params_list):
"""
params = {
"objective": ["regression"],
"metric": ["rmse"],
"num_leaves": [10,30,50],
"min_child_weight": [40,50,60],
"learning_rate": [0.01,0.03, 0.05, 0.06],
"bagging_fraction": [0.6,0.7,0.8],
"feature_fraction": [0.6,0.7,0.8],
"bagging_frequency": [4,5,6],
"bagging_seed": [2018],
"verbosity": [-1]
}
:param map_list:
:return:
for this example, it will return all the combinations
"""
res = deque([{}])
for key in params_list:
value_list = params_list[key]
l = len(res)
for i in range(l):
cur_dict = res.popleft()
for value in value_list:
new_cur_dict = copy.deepcopy(cur_dict)
new_cur_dict[key] = value
res.insert(-1, (dict)(new_cur_dict))
return res
# %%
test = False
if test:
from collections import deque
params_list = {
"objective": ["regression"],
"metric": ["rmse"],
"num_leaves": [10, 30, 50],
"min_child_weight": [40, 50, 60],
"learning_rate": [0.01, 0.03, 0.05, 0.06],
"bagging_fraction": [0.6, 0.7, 0.8],
"feature_fraction": [0.6, 0.7, 0.8],
"bagging_frequency": [4, 5, 6],
"bagging_seed": [2018],
"verbosity": [-1]
}
res = deque([{}])
for key in params_list:
value_list = params_list[key]
l = len(res)
for i in range(l):
cur_dict = res.popleft()
for value in value_list:
new_cur_dict = copy.deepcopy(cur_dict)
new_cur_dict[key] = value
res.insert(-1, (dict)(new_cur_dict))
print(res)
``` |
{
"source": "JobQiu/PrototypicalNetwork",
"score": 3
} |
#### File: PrototypicalNetwork/base/base_model.py
```python
import tensorflow as tf
class BaseModel:
def __init__(self, config):
self.config = config
# init the global step
self.init_global_step()
# init the epoch counter
self.init_cur_epoch()
# save function that saves the checkpoint in the path defined in the config file
def save(self, sess):
print("Saving model...")
self.saver.save(sess, self.config.checkpoint_dir, self.global_step_tensor)
print("Model saved")
# load latest checkpoint from the experiment path defined in the config file
def load(self, sess):
latest_checkpoint = tf.train.latest_checkpoint(self.config.checkpoint_dir)
if latest_checkpoint:
print("Loading model checkpoint {} ...\n".format(latest_checkpoint))
self.saver.restore(sess, latest_checkpoint)
print("Model loaded")
# just initialize a tensorflow variable to use it as epoch counter
def init_cur_epoch(self):
with tf.variable_scope('cur_epoch'):
self.cur_epoch_tensor = tf.Variable(0, trainable=False, name='cur_epoch')
self.increment_cur_epoch_tensor = tf.assign(self.cur_epoch_tensor, self.cur_epoch_tensor + 1)
# just initialize a tensorflow variable to use it as global step counter
def init_global_step(self):
# DON'T forget to add the global step tensor to the tensorflow trainer
with tf.variable_scope('global_step'):
self.global_step_tensor = tf.Variable(0, trainable=False, name='global_step')
def init_saver(self):
# here you initialize the tensorflow saver that will be used in saving the checkpoint.
self.saver = tf.train.Saver(max_to_keep=self.config.max_to_keep)
def build_model(self):
raise NotImplementedError
```
#### File: PrototypicalNetwork/models/example_model.py
```python
from base.base_model import BaseModel
import tensorflow as tf
from utils.tf_utils import euclidean_distance, euclidean_distance_with_weight
def conv_block(inputs, out_channels, name='conv'):
with tf.variable_scope(name):
conv = tf.layers.conv2d(inputs, out_channels, kernel_size=3, padding='SAME')
conv = tf.contrib.layers.batch_norm(conv, updates_collections=None, decay=0.99, scale=True, center=True)
conv = tf.nn.relu(conv)
conv = tf.contrib.layers.max_pool2d(conv, 2)
return conv
def encoder(x, h_dim, z_dim, reuse=False):
with tf.variable_scope('encoder', reuse=reuse):
net = conv_block(x, h_dim, name='conv_1')
net = conv_block(net, h_dim, name='conv_2')
net = conv_block(net, h_dim, name='conv_3')
net = conv_block(net, z_dim, name='conv_4')
net = tf.contrib.layers.flatten(net)
return net
def embedding2weights(x, num_class=20, num_support=5, embedding_size=1600):
if len(x.get_shape()) == 2:
x = tf.reshape(x, [num_class, num_support, -1])
with tf.variable_scope(name_or_scope="get_weight", reuse=tf.AUTO_REUSE):
x_max = tf.expand_dims(tf.reduce_max(x, 1), 1)
x_min = tf.expand_dims(tf.reduce_min(x, 1), 1)
x_sum = tf.expand_dims(tf.reduce_sum(x, 1), 1)
x_prod = tf.expand_dims(tf.reduce_prod(x, 1), 1)
x_mean, x_variance = tf.nn.moments(x, [1])
x_mean = tf.expand_dims(x_mean, 1)
x_variance = tf.expand_dims(x_variance, 1)
para_list = [x_max, x_min, x_mean, x_prod, x_sum, x_variance]
x_all = tf.concat(para_list, 1)
x_all = tf.transpose(x_all, perm=[0, 2, 1])
weight = tf.get_variable(shape=(len(para_list), 1), name='weight', dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=-0.1, maxval=0.1))
weight2 = tf.get_variable(shape=(1600, 6), name='weight2', dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=-0.1, maxval=0.1))
_W_t = tf.tile(tf.expand_dims(weight2, axis=0), [num_class, 1, 1])
# out = tf.matmul(x_all, _W_t)
out = tf.multiply(x_all, _W_t)
out = tf.reduce_sum(out, axis=2)
# out = tf.squeeze(out, axis=2)
out = tf.nn.softmax(out, axis=1)
out = tf.scalar_mul(1600, out)
# out = tf.multiply(out, embedding_size)
return out
def embedding2weight_rnn(x, num_class=20, num_support=5, embedding_size=1600):
if len(x.get_shape()) == 2:
x = tf.reshape(x, [num_class, num_support, -1])
with tf.variable_scope(name_or_scope="get_weight", reuse=tf.AUTO_REUSE):
x_max = tf.expand_dims(tf.reduce_max(x, 1), 1)
x_min = tf.expand_dims(tf.reduce_min(x, 1), 1)
x_sum = tf.expand_dims(tf.reduce_sum(x, 1), 1)
x_prod = tf.expand_dims(tf.reduce_prod(x, 1), 1)
x_mean, x_variance = tf.nn.moments(x, [1])
x_mean = tf.expand_dims(x_mean, 1)
x_variance = tf.expand_dims(x_variance, 1)
para_list = [x_max, x_min, x_mean, x_prod, x_sum, x_variance]
x_all = tf.concat(para_list, 1)
x_all = tf.transpose(x_all, perm=[0, 2, 1])
weight = tf.get_variable(shape=(len(para_list), 1), name='weight', dtype=tf.float32,
initializer=tf.random_uniform_initializer(minval=-0.1, maxval=0.1))
_W_t = tf.tile(tf.expand_dims(weight, axis=0), [num_class, 1, 1])
out = tf.matmul(x_all, _W_t)
out = tf.squeeze(out, axis=2)
out = tf.nn.softmax(out, axis=1)
out = tf.scalar_mul(1600, out)
# out = tf.multiply(out, embedding_size)
return out
class PrototypicalNetwork(BaseModel):
def __init__(self, config, with_weight=True):
super(PrototypicalNetwork, self).__init__(config)
self.build_model(with_weight=with_weight)
self.init_saver()
def build_model(self, with_weight):
config = self.config
num_class = config.num_class_per_episode
num_support = config.num_sample_per_class
num_query = config.num_query_per_class
self.x = tf.placeholder(dtype=tf.float32,
shape=[num_class, num_support, config.image_height, config.image_width,
config.image_channel_size],
name='support_set')
self.q = tf.placeholder(dtype=tf.float32,
shape=[num_class, num_query, config.image_height, config.image_width,
config.image_channel_size],
name='query')
self.y = tf.placeholder(tf.int64, [None, None], name='label_of_query')
y_one_hot = tf.one_hot(self.y, depth=num_class)
self.emb_x = encoder(tf.reshape(self.x, [num_class * num_support, config.image_height, config.image_width,
config.image_channel_size]), config.hidden_channel_size,
config.output_channel_size)
emb_dim = tf.shape(self.emb_x)[-1]
weights = embedding2weights(self.emb_x, num_class, num_support,
embedding_size=emb_dim) # embedding_size=config.embedding_size)
self.weights = weights
self.prototype = tf.reduce_mean(tf.reshape(self.emb_x, [num_class, num_support, emb_dim]), axis=1,
name='prototype')
self.emb_q = encoder(tf.reshape(self.q, [num_class * num_query, config.image_height, config.image_width,
config.image_channel_size]),
config.hidden_channel_size,
config.output_channel_size,
reuse=True)
# dists = euclidean_distance(self.emb_q, self.prototype)
if with_weight:
dists = euclidean_distance_with_weight(self.emb_q, self.prototype, weights)
else:
dists = euclidean_distance(self.emb_q, self.prototype)
log_p_y = tf.reshape(tf.nn.log_softmax(-dists), [num_class, num_query, -1])
# cross entropy loss
self.loss = -tf.reduce_mean(
tf.reshape(
tf.reduce_sum(tf.multiply(y_one_hot, log_p_y), axis=-1),
[-1]
),
name='loss'
)
self.acc = tf.reduce_mean(tf.cast(x=tf.equal(tf.argmax(log_p_y, axis=-1), self.y),
dtype=tf.float32
), name='accuracy'
)
self.train_op = tf.train.AdamOptimizer().minimize(self.loss, global_step=self.global_step_tensor)
class ExampleModel(BaseModel):
def __init__(self, config):
super(ExampleModel, self).__init__(config)
self.build_model()
self.init_saver()
def build_model(self):
self.is_training = tf.placeholder(tf.bool)
self.x = tf.placeholder(tf.float32, shape=[None] + self.config.state_size)
self.y = tf.placeholder(tf.float32, shape=[None, 10])
# network architecture
d1 = tf.layers.dense(self.x, 512, activation=tf.nn.relu, name="dense1")
d2 = tf.layers.dense(d1, 10, name="dense2")
with tf.name_scope("loss"):
self.cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=d2))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_step = tf.train.AdamOptimizer(self.config.learning_rate).minimize(self.cross_entropy,
global_step=self.global_step_tensor)
correct_prediction = tf.equal(tf.argmax(d2, 1), tf.argmax(self.y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def init_saver(self):
# here you initialize the tensorflow saver that will be used in saving the checkpoint.
self.saver = tf.train.Saver(max_to_keep=self.config.max_to_keep)
class PrototypicalNetwork_v2(BaseModel):
def __init__(self, config):
super(PrototypicalNetwork_v2, self).__init__(config)
self.build_model()
self.init_saver()
def build_model(self):
config = self.config
self.support_set = tf.placeholder(dtype=tf.float32,
shape=[None, None, config.image_height, config.image_width,
config.image_channel_size],
name='support_set')
self.queqy = tf.placeholder(dtype=tf.float32,
shape=[None, None, config.image_height, config.image_width,
config.image_channel_size],
name='query')
self.query_label = tf.placeholder(dtype=tf.int32, shape=[None, None], name='label')
query_label_one_hot = tf.one_hot(self.query_label, depth=config.num_class_per_episode)
pass
``` |
{
"source": "jobquiroz/mlflow",
"score": 3
} |
#### File: 12_Training_val_inference_01_Inference_pipeline/random_forest/run.py
```python
import argparse
import logging
import json
import pandas as pd
import numpy as np
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import roc_auc_score, plot_confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OrdinalEncoder, StandardScaler, FunctionTransformer
import matplotlib.pyplot as plt
import wandb
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.impute import SimpleImputer
logging.basicConfig(level=logging.INFO, format="%(asctime)-15s %(message)s")
logger = logging.getLogger()
def go(args):
run = wandb.init(project="exercise_10", job_type="train")
logger.info("Downloading and reading train artifact")
train_data_path = run.use_artifact(args.train_data).file()
df = pd.read_csv(train_data_path, low_memory=False)
# Extract the target from the features
logger.info("Extracting target from dataframe")
X = df.copy()
y = X.pop("genre")
logger.info("Splitting train/val")
X_train, X_val, y_train, y_val = train_test_split(
X, y, test_size=0.3, stratify=y, random_state=42
)
logger.info("Setting up pipeline")
pipe = get_inference_pipeline(args)
logger.info("Fitting")
pipe.fit(X_train, y_train)
logger.info("Scoring")
score = roc_auc_score(
y_val, pipe.predict_proba(X_val), average="macro", multi_class="ovo"
)
run.summary["AUC"] = score
# We collect the feature importance for all non-nlp features first
feat_names = np.array(
pipe["preprocessor"].transformers[0][-1]
+ pipe["preprocessor"].transformers[1][-1]
)
feat_imp = pipe["classifier"].feature_importances_[: len(feat_names)]
# For the NLP feature we sum across all the TF-IDF dimensions into a global
# NLP importance
nlp_importance = sum(pipe["classifier"].feature_importances_[len(feat_names) :])
feat_imp = np.append(feat_imp, nlp_importance)
feat_names = np.append(feat_names, "title + song_name")
fig_feat_imp, sub_feat_imp = plt.subplots(figsize=(10, 10))
idx = np.argsort(feat_imp)[::-1]
sub_feat_imp.bar(range(feat_imp.shape[0]), feat_imp[idx], color="r", align="center")
_ = sub_feat_imp.set_xticks(range(feat_imp.shape[0]))
_ = sub_feat_imp.set_xticklabels(feat_names[idx], rotation=90)
fig_feat_imp.tight_layout()
fig_cm, sub_cm = plt.subplots(figsize=(10, 10))
plot_confusion_matrix(
pipe,
X_val,
y_val,
ax=sub_cm,
normalize="true",
values_format=".1f",
xticks_rotation=90,
)
fig_cm.tight_layout()
run.log(
{
"feature_importance": wandb.Image(fig_feat_imp),
"confusion_matrix": wandb.Image(fig_cm),
}
)
def get_inference_pipeline(args):
# Our pipeline will contain a pre-processing step and a Random Forest.
# The pre-processing step will impute missing values, encode the labels,
# normalize numerical features and compute a TF-IDF for the textual
# feature
# We need 3 separate preprocessing "tracks":
# - one for categorical features
# - one for numerical features
# - one for textual ("nlp") features
# Categorical preprocessing pipeline.
# NOTE: we sort the list so that the order of the columns will be
# defined, and not dependent on the order in the input dataset
categorical_features = sorted(["time_signature", "key"])
categorical_transformer = make_pipeline(
SimpleImputer(strategy="constant", fill_value=0), OrdinalEncoder()
)
# Numerical preprocessing pipeline
numeric_features = sorted([
"danceability",
"energy",
"loudness",
"speechiness",
"acousticness",
"instrumentalness",
"liveness",
"valence",
"tempo",
"duration_ms",
])
############# YOUR CODE HERE
numeric_transformer = make_pipeline(
SimpleImputer(strategy='median'), StandardScaler()
) # USE make_pipeline to create a pipeline containing a SimpleImputer using strategy=median
# and a StandardScaler (you can use the default options for the latter)
# Textual ("nlp") preprocessing pipeline
nlp_features = ["text_feature"]
# This trick is needed because SimpleImputer wants a 2d input, but
# TfidfVectorizer wants a 1d input. So we reshape in between the two steps
reshape_to_1d = FunctionTransformer(np.reshape, kw_args={"newshape": -1})
############# YOUR CODE HERE
nlp_transformer = make_pipeline(
SimpleImputer(strategy = 'constant', fill_value = ""),
reshape_to_1d,
TfidfVectorizer(binary = True)
)
# USE make_pipeline to create a pipeline containing a SimpleImputer with strategy=constant and
# fill_value="" (the empty string), followed by our custom reshape_to_1d instance, and finally
# insert a TfidfVectorizer with the options binary=True
# Put the 3 tracks together into one pipeline using the ColumnTransformer
# This also drops the columns that we are not explicitly transforming
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features), # COMPLETE HERE using the categorical transformer and the categorical_features,
("nlp1", nlp_transformer, nlp_features),
],
remainder="drop", # This drops the columns that we do not transform (i.e., we don't use)
)
# Get the configuration for the model
with open(args.model_config) as fp:
model_config = json.load(fp)
# Add it to the W&B configuration so the values for the hyperparams
# are tracked
wandb.config.update(model_config)
############# YOUR CODE HERE
# Append classifier to preprocessing pipeline.
# Now we have a full prediction pipeline.
pipe = Pipeline([ ('preprocessor', preprocessor),
('classifier', RandomForestClassifier(**model_config))])
# CREATE a Pipeline instances with 2 steps: one step called "preprocessor" using the
# preprocessor instance, and another one called "classifier" using RandomForestClassifier(**model_config)
# (i.e., a Random Forest with the configuration we have received as input)
# NOTE: here you should create the Pipeline object directly, and not make_pipeline
# HINT: Pipeline(steps=[("preprocessor", instance1), ("classifier", LogisticRegression)]) creates a
# Pipeline with two steps called "preprocessor" and "classifier" using the sklearn instances instance1
# as preprocessor and a LogisticRegression as classifier
return pipe
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Train a Random Forest",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--train_data",
type=str,
help="Fully-qualified name for the training data artifact",
required=True,
)
parser.add_argument(
"--model_config",
type=str,
help="Path to a JSON file containing the configuration for the random forest",
required=True,
)
args = parser.parse_args()
go(args)
```
#### File: mlflow/13_Training_val_inference_02_Validate_and_choose/main.py
```python
import json
import mlflow
import os
import hydra
from omegaconf import DictConfig, OmegaConf
# This automatically reads in the configuration
@hydra.main(config_name='config')
def go(config: DictConfig):
# Setup the wandb experiment. All runs will be grouped under this name
os.environ["WANDB_PROJECT"] = config["main"]["project_name"]
os.environ["WANDB_RUN_GROUP"] = config["main"]["experiment_name"]
# You can get the path at the root of the MLflow project with this:
root_path = hydra.utils.get_original_cwd()
# Serialize decision tree configuration
model_config = os.path.abspath("random_forest_config.yml")
with open(model_config, "w+") as fp:
fp.write(OmegaConf.to_yaml(config["random_forest_pipeline"]))
_ = mlflow.run(
os.path.join(root_path, "random_forest"),
"main",
parameters={
"train_data": config["data"]["train_data"],
"model_config": model_config
},
)
if __name__ == "__main__":
go()
```
#### File: 14_Training_val_inferenfe_03_Export_model/random_forest/run.py
```python
import argparse
import logging
import os
from wandb.sdk.data_types import _DATA_FRAMES_SUBDIR
import yaml
import tempfile
import mlflow
import pandas as pd
import numpy as np
from mlflow.models import infer_signature
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import roc_auc_score, plot_confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OrdinalEncoder, StandardScaler, FunctionTransformer
import matplotlib.pyplot as plt
import wandb
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.impute import SimpleImputer
logging.basicConfig(level=logging.INFO, format="%(asctime)-15s %(message)s")
logger = logging.getLogger()
def go(args):
run = wandb.init(job_type="train")
logger.info("Downloading and reading train artifact")
train_data_path = run.use_artifact(args.train_data).file()
df = pd.read_csv(train_data_path, low_memory=False)
# Extract the target from the features
logger.info("Extracting target from dataframe")
X = df.copy()
y = X.pop("genre")
logger.info("Splitting train/val")
X_train, X_val, y_train, y_val = train_test_split(
X, y, test_size=0.3, stratify=y, random_state=42
)
logger.info("Setting up pipeline")
pipe = get_training_inference_pipeline(args)
logger.info("Fitting")
pipe.fit(X_train, y_train)
# Evaluate
pred = pipe.predict(X_val)
pred_proba = pipe.predict_proba(X_val)
logger.info("Scoring")
score = roc_auc_score(y_val, pred_proba, average="macro", multi_class="ovo")
run.summary["AUC"] = score
# Export if required
if args.export_artifact != "null":
export_model(run, pipe, X_val, pred, args.export_artifact)
# Some useful plots
fig_feat_imp = plot_feature_importance(pipe)
fig_cm, sub_cm = plt.subplots(figsize=(10, 10))
plot_confusion_matrix(
pipe,
X_val,
y_val,
ax=sub_cm,
normalize="true",
values_format=".1f",
xticks_rotation=90,
)
fig_cm.tight_layout()
run.log(
{
"feature_importance": wandb.Image(fig_feat_imp),
"confusion_matrix": wandb.Image(fig_cm),
}
)
def export_model(run, pipe, X_val, val_pred, export_artifact):
# Infer the signature of the model
signature = infer_signature(X_val, val_pred)
with tempfile.TemporaryDirectory() as temp_dir:
export_path = os.path.join(temp_dir, "model_export")
#### YOUR CODE HERE
# Save the pipeline in the export_path directory using mlflow.sklearn.save_model
# function. Provide the signature computed above ("signature") as well as a few
# examples (input_example=X_val.iloc[:2]), and use the CLOUDPICKLE serialization
# format (mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE)
mlflow.sklearn.save_model(
pipe,
export_path,
signature = signature,
input_example = X_val.iloc[:2],
serialization_format = mlflow.sklearn.SERIALIZATION_FORMAT_CLOUDPICKLE
)
# Then upload the temp_dir directory as an artifact:
# 1. create a wandb.Artifact instance called "artifact"
# 2. add the temp directory using .add_dir
# 3. log the artifact to the run
artifact = wandb.Artifact(
export_artifact,
type = 'model_export',
description='Random Forest pipeline export',
)
artifact.add_dir(export_path)
run.log_artifact(artifact)
# Make sure the artifact is uploaded before the temp dir
# gets deleted
artifact.wait()
def plot_feature_importance(pipe):
# We collect the feature importance for all non-nlp features first
feat_names = np.array(
pipe["preprocessor"].transformers[0][-1]
+ pipe["preprocessor"].transformers[1][-1]
)
feat_imp = pipe["classifier"].feature_importances_[: len(feat_names)]
# For the NLP feature we sum across all the TF-IDF dimensions into a global
# NLP importance
nlp_importance = sum(pipe["classifier"].feature_importances_[len(feat_names) :])
feat_imp = np.append(feat_imp, nlp_importance)
feat_names = np.append(feat_names, "title + song_name")
fig_feat_imp, sub_feat_imp = plt.subplots(figsize=(10, 10))
idx = np.argsort(feat_imp)[::-1]
sub_feat_imp.bar(range(feat_imp.shape[0]), feat_imp[idx], color="r", align="center")
_ = sub_feat_imp.set_xticks(range(feat_imp.shape[0]))
_ = sub_feat_imp.set_xticklabels(feat_names[idx], rotation=90)
fig_feat_imp.tight_layout()
return fig_feat_imp
def get_training_inference_pipeline(args):
# Get the configuration for the pipeline
with open(args.model_config) as fp:
model_config = yaml.safe_load(fp)
# Add it to the W&B configuration so the values for the hyperparams
# are tracked
wandb.config.update(model_config)
# We need 3 separate preprocessing "tracks":
# - one for categorical features
# - one for numerical features
# - one for textual ("nlp") features
# Categorical preprocessing pipeline
categorical_features = sorted(model_config["features"]["categorical"])
categorical_transformer = make_pipeline(
SimpleImputer(strategy="constant", fill_value=0), OrdinalEncoder()
)
# Numerical preprocessing pipeline
numeric_features = sorted(model_config["features"]["numerical"])
numeric_transformer = make_pipeline(
SimpleImputer(strategy="median"), StandardScaler()
)
# Textual ("nlp") preprocessing pipeline
nlp_features = sorted(model_config["features"]["nlp"])
# This trick is needed because SimpleImputer wants a 2d input, but
# TfidfVectorizer wants a 1d input. So we reshape in between the two steps
reshape_to_1d = FunctionTransformer(np.reshape, kw_args={"newshape": -1})
nlp_transformer = make_pipeline(
SimpleImputer(strategy="constant", fill_value=""),
reshape_to_1d,
TfidfVectorizer(
binary=True, max_features=model_config["tfidf"]["max_features"]
),
)
# Put the 3 tracks together into one pipeline using the ColumnTransformer
# This also drops the columns that we are not explicitly transforming
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
("nlp1", nlp_transformer, nlp_features),
],
remainder="drop", # This drops the columns that we do not transform
)
# Append classifier to preprocessing pipeline.
# Now we have a full prediction pipeline.
pipe = Pipeline(
steps=[
("preprocessor", preprocessor),
("classifier", RandomForestClassifier(**model_config["random_forest"])),
]
)
return pipe
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Train a Random Forest",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--train_data",
type=str,
help="Fully-qualified name for the training data artifact",
required=True,
)
parser.add_argument(
"--model_config",
type=str,
help="Path to a YAML file containing the configuration for the random forest",
required=True,
)
parser.add_argument(
"--export_artifact",
type=str,
help="Name of the artifact for the exported model. Use 'null' for no export.",
required=False,
default="null",
)
args = parser.parse_args()
go(args)
``` |
{
"source": "JOBR0/PerceiverIO_Pytorch",
"score": 3
} |
#### File: JOBR0/PerceiverIO_Pytorch/example_opt_flow.py
```python
import os
import torch
import matplotlib.pyplot as plt
from utils.flow_utils import flow_to_image
from utils.utils import load_image
from perceiver_io.flow_perceiver import FlowPerceiver
def flow_example():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Device:", device)
perceiver = FlowPerceiver(img_size=(368, 496))
perceiver.eval()
perceiver.to(device)
ckpt_file = "./pytorch_checkpoints/optical_flow_checkpoint.pth"
# check if file exists
if not os.path.isfile(ckpt_file):
raise ValueError("Please download the model checkpoint and place it in /pytorch_checkpoints")
checkpoint = torch.load(ckpt_file, map_location=device)
perceiver.load_state_dict(checkpoint["model_state_dict"])
img1 = load_image("./sample_data/frame_0016.png", device)
img2 = load_image("./sample_data/frame_0017.png", device)
# Normalize images
img1_norm = 2 * (img1 / 255.0) - 1.0
img2_norm = 2 * (img2 / 255.0) - 1.0
img1_norm = img1_norm.to(device)
img2_norm = img2_norm.to(device)
# Predict Flow
with torch.inference_mode():
flow = perceiver(img1_norm, img2_norm, test_mode=True)
# Show prediction
figure = plt.figure()
plt.subplot(2, 2, 1)
plt.imshow(img1[0].permute(1, 2, 0).cpu().numpy() / 255)
plt.subplot(2, 2, 2)
plt.imshow(img2[0].permute(1, 2, 0).cpu().numpy() / 255)
plt.subplot(2, 2, 3)
plt.imshow(flow_to_image(flow[0].permute(1, 2, 0).cpu().numpy()))
plt.show()
if __name__ == "__main__":
flow_example()
```
#### File: PerceiverIO_Pytorch/perceiver_io/flow_perceiver.py
```python
import itertools
from typing import Sequence
import torch.nn as nn
import torch
from perceiver_io.io_processors.preprocessors import ImagePreprocessor
from perceiver_io.io_processors.processor_utils import patches_for_flow
from perceiver_io.output_queries import FlowQuery
from perceiver_io.perceiver import PerceiverIO
from timm.models.layers import to_2tuple
import torch.nn.functional as F
from torch.cuda.amp import autocast
from perceiver_io.position_encoding import PosEncodingType
from perceiver_io.io_processors.postprocessors import FlowPostprocessor
class FlowPerceiver(nn.Module):
"""
FlowPerceiver: Perceiver for optical flow
Args:
img_size (Sequence[int]): Size of training images (height x width). Default: (368, 496)
flow_scale_factor (int): Factor by which the output is multiplied
https://github.com/deepmind/deepmind-research/issues/266. Default: 0.2
num_latents (int): Number of latent variables. Default: 2048
n_latent_channels (int): Number of latent channels. Default: 512
num_self_attends_per_block (int): Number of self attention layers. Default: 24
num_blocks (int): Number of blocks. All blocks share weights. Default: 1
mixed_precision (bool): Whether to run the perceiver in mixed precision. Default: False
"""
def __init__(
self,
img_size: Sequence[int] = (368, 496),
flow_scale_factor: int = 20/100,
num_latents: int = 2048,
num_latent_channels=512,
num_self_attends_per_block: int = 24,
num_blocks: int = 1,
mixed_precision: bool = False):
super().__init__()
self._flow_scale_factor = flow_scale_factor
self.mixed_precision = mixed_precision
channels = 3
patch_size = 3
preprocessor_channels = 64
input_preprocessor = ImagePreprocessor(
img_size=img_size,
input_channels=channels * patch_size ** 2,
position_encoding_type=PosEncodingType.FOURIER,
fourier_position_encoding_kwargs=dict(
num_bands=64,
max_resolution=img_size,
sine_only=False,
concat_pos=True,
),
n_extra_pos_mlp=0,
prep_type="patches",
spatial_downsample=1,
conv_after_patching=True,
temporal_downsample=2,
num_channels=preprocessor_channels)
perceiver_encoder_kwargs = dict(
num_self_attend_heads=16,
)
perceiver_decoder_kwargs = dict(
output_w_init="zeros",
)
output_query = FlowQuery(
preprocessed_input_channels=input_preprocessor.n_output_channels(),
output_img_size=img_size,
output_num_channels=2,
)
postprocessor = FlowPostprocessor(
img_size=img_size,
flow_scale_factor=flow_scale_factor
)
self.perceiver = PerceiverIO(
final_project_out_channels=2,
num_blocks=num_blocks,
num_self_attends_per_block=num_self_attends_per_block,
num_latents=num_latents,
num_latent_channels=num_latent_channels,
perceiver_encoder_kwargs=perceiver_encoder_kwargs,
perceiver_decoder_kwargs=perceiver_decoder_kwargs,
output_queries=output_query,
input_preprocessors=input_preprocessor,
output_postprocessors=postprocessor,)
self.H, self.W = to_2tuple(img_size)
def compute_grid_indices(self, image_shape: tuple, min_overlap: int):
"""
Compute top-left corner coordinates for patches
Args:
image_shape (tuple): Height and width of the input image
min_overlap (int): Minimum number of pixels that two patches overlap
"""
if min_overlap >= self.H or min_overlap >= self.W:
raise ValueError(
f"Overlap should be less than size of patch (got {min_overlap}"
f"for patch size {(self.H, self.W)}).")
ys = list(range(0, image_shape[0], self.H - min_overlap))
xs = list(range(0, image_shape[1], self.W - min_overlap))
# Make sure the final patch is flush with the image boundary
ys[-1] = image_shape[0] - self.H
xs[-1] = image_shape[1] - self.W
# Avoid predicting same patch multiple times
if image_shape[0] == self.H:
ys = [0]
if image_shape[1] == self.W:
xs = [0]
return itertools.product(ys, xs)
def _predict_patch(self, patch):
"""Predict flow for one image patch as big as training images"""
with autocast(enabled=self.mixed_precision):
# Extract overlapping 3x3 patches
patch = patches_for_flow(patch).movedim(-1, -3)
output = self.perceiver(patch)
return output
def forward(self, image1: torch.Tensor, image2: torch.Tensor, test_mode: bool = False, min_overlap: int = 20):
"""
Computes forward pass for flow perceiver
Args:
image1 (torch.Tensor): source images (N, C, H, W).
image2 (torch.Tensor): target images (N, C, H, W).
test_mode (bool): If in test mode. Default: False
min_overlap (int): Minimum overlap of patches if images are bigger than training size. Default: 20
"""
height = image1.shape[2]
width = image1.shape[3]
image1 = image1.contiguous()
image2 = image2.contiguous()
# Stack in time dimension
inputs = torch.stack([image1, image2], axis=1)
if height < self.H:
raise ValueError(
f"Height of image (shape: {image1.shape}) must be at least {self.H:}."
"Please pad or resize your image to the minimum dimension."
)
if width < self.W:
raise ValueError(
f"Width of image (shape: {image1.shape}) must be at least {self.W}."
"Please pad or resize your image to the minimum dimension."
)
if test_mode:
# in test_mode, image size can be arbitrary
# the flow is predicted for patches of training size and than stitched together
flows = 0
flow_count = 0
grid_indices = self.compute_grid_indices((height, width), min_overlap)
for y, x in grid_indices:
inp_piece = inputs[..., y: y + self.H, x: x + self.W]
flow_piece = self._predict_patch(inp_piece)
# weights should give more weight to flow from center of patches
weights_y, weights_x = torch.meshgrid(torch.arange(self.H), torch.arange(self.W), indexing="ij")
weights_x = torch.minimum(weights_x + 1, self.W - weights_x)
weights_y = torch.minimum(weights_y + 1, self.H - weights_y)
weights = torch.minimum(weights_x, weights_y)[None, None, :, :]
weights = weights / weights.max()
weights = weights.to(flow_piece.device)
padding = (x, width - x - self.W, y, height - y - self.H)
flows = flows + F.pad(flow_piece * weights, padding)
flow_count = flow_count + F.pad(weights, padding)
flows = flows / flow_count
output = flows
else:
assert height == self.H and width == self.W, \
f"In training mode images must have size equal to specified img_size {(self.H, self.W)}"
output = self._predict_patch(inputs)
return output
```
#### File: perceiver_io/io_processors/processor_utils.py
```python
from typing import Any, Callable, Mapping, Optional, Sequence, Tuple
import einops
from timm.models.layers import trunc_normal_
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.utils import conv_output_shape, same_padding
ModalitySizeT = Mapping[str, int]
PreprocessorOutputT = Tuple[torch.Tensor, Optional[torch.Tensor], torch.Tensor]
PreprocessorT = Callable[..., PreprocessorOutputT]
PostprocessorT = Callable[..., Any]
def space_to_depth(
frames: torch.Tensor,
temporal_block_size: int = 1,
spatial_block_size: int = 1) -> torch.Tensor:
"""Reduces spatial and/or temporal dimensions by stacking features in the channel dimension."""
if len(frames.shape) == 4:
return einops.rearrange(
frames, "b (h dh) (w dw) c -> b h w (dh dw c)",
dh=spatial_block_size, dw=spatial_block_size)
elif len(frames.shape) == 5:
return einops.rearrange(
frames, "b (t dt) (h dh) (w dw) c -> b t h w (dt dh dw c)",
dt=temporal_block_size, dh=spatial_block_size, dw=spatial_block_size)
else:
raise ValueError(
"Frames should be of rank 4 (batch, height, width, channels)"
" or rank 5 (batch, time, height, width, channels)")
def reverse_space_to_depth(
frames: torch.Tensor,
temporal_block_size: int = 1,
spatial_block_size: int = 1) -> torch.Tensor:
"""Reverse space to depth transform."""
if len(frames.shape) == 4:
return einops.rearrange(
frames, "b h w (dh dw c) -> b (h dh) (w dw) c",
dh=spatial_block_size, dw=spatial_block_size)
elif len(frames.shape) == 5:
return einops.rearrange(
frames, "b t h w (dt dh dw c) -> b (t dt) (h dh) (w dw) c",
dt=temporal_block_size, dh=spatial_block_size, dw=spatial_block_size)
else:
raise ValueError(
"Frames should be of rank 4 (batch, height, width, channels)"
" or rank 5 (batch, time, height, width, channels)")
def extract_patches(images: torch.Tensor,
size: Sequence[int],
stride: Sequence[int] = 1,
dilation: Sequence[int] = 1,
padding: str = "VALID") -> torch.Tensor:
"""Extract patches from images.
The function extracts patches of shape sizes from the input images in the same
manner as a convolution with kernel of shape sizes, stride equal to strides,
and the given padding scheme.
The patches are stacked in the channel dimension.
Args:
images (torch.Tensor): input batch of images of shape [B, C, H, W].
size (Sequence[int]): size of extracted patches. Must be [patch_height, patch_width].
stride (Sequence[int]): strides, must be [stride_rows, stride_cols]. Default: 1
dilation (Sequence[int]): as in dilated convolutions, must be [dilation_rows, dilation_cols]. Default: 1
padding (str): padding algorithm to use. Default: VALID
Returns:
Tensor of shape [B, patch_rows, patch_cols, size_rows * size_cols * C]
"""
if padding != "VALID":
raise ValueError(f"Only valid padding is supported. Got {padding}")
if images.ndim != 4:
raise ValueError(
f"Rank of images must be 4 (got tensor of shape {images.shape})")
n, c, h, w = images.shape
ph, pw = size
pad = 0
out_h, out_w = conv_output_shape((h, w), size, stride, pad, dilation)
patches = F.unfold(images, size, dilation=dilation, padding=0, stride=stride)
patches = einops.rearrange(patches, "n (c ph pw) (out_h out_w) -> n out_h out_w (ph pw c)",
c=c, ph=ph, pw=pw, out_h=out_h, out_w=out_w)
return patches
def patches_for_flow(inputs: torch.Tensor) -> torch.Tensor:
"""Extract 3x3x2 image patches for flow inputs.
Args:
inputs (torch.Tensor): image inputs (N, 2, C, H, W) """
batch_size = inputs.shape[0]
inputs = einops.rearrange(inputs, "N T C H W -> (N T) C H W")
padded_inputs = F.pad(inputs, [1, 1, 1, 1], mode="constant")
outputs = extract_patches(
padded_inputs,
size=[3, 3],
stride=1,
dilation=1,
padding="VALID")
outputs = einops.rearrange(outputs, "(N T) H W C-> N T H W C", N=batch_size)
return outputs
# ------------------------------------------------------------
# ------------------- Up/down-sampling ---------------------
# ------------------------------------------------------------
class Conv2DDownsample(nn.Module):
"""Downsamples 4x by applying a 2D convolution and doing max pooling."""
def __init__(
self,
num_layers: int = 1,
in_channels: int = 3,
num_channels: int = 64,
use_batchnorm: bool = True
):
"""Constructs a Conv2DDownsample model.
Args:
num_layers (int): The number of conv->max_pool layers. Default: 1
in_channels (int): The number of input channels. Default: 3
num_channels (int): The number of conv output channels. Default: 64
use_batchnorm (bool): Whether to use batchnorm. Default: True
"""
super().__init__()
self._num_layers = num_layers
self.norms = None
if use_batchnorm:
self.norms = nn.ModuleList()
self.convs = nn.ModuleList()
for _ in range(self._num_layers):
conv = nn.Conv2d(in_channels=in_channels,
out_channels=num_channels,
kernel_size=7,
stride=2,
bias=False)
trunc_normal_(conv.weight, mean=0.0, std=0.01)
self.convs.append(conv)
in_channels = num_channels
if use_batchnorm:
batchnorm = nn.BatchNorm2d(num_features=num_channels)
self.norms.append(batchnorm)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
out = inputs
for l, conv in enumerate(self.convs):
pad = same_padding(out.shape[1:], conv.kernel_size, conv.stride, dims=2)
out = F.pad(out, pad, mode="constant", value=0.0)
out = conv(out)
if self.norms is not None:
out = self.norms[l](out)
out = F.relu(out)
pad = same_padding(out.shape[1:], 3, 2, dims=2)
out = F.pad(out, pad, mode="constant", value=0.0)
out = F.max_pool2d(out, kernel_size=3, stride=2)
return out
# class Conv2DUpsample(nn.Module):
# """Upsamples 4x using 2 2D transposed convolutions."""
#
# def __init__(
# self,
# n_outputs: int,
# in_channels: int = 64,
# ):
# """Constructs a Conv2DUpsample model.
# Args:
# n_outputs: The number of output channels of the module.
# name: Name of the module.
# """
# super().__init__()
#
# self.transp_conv1 = nn.ConvTranspose2d(in_channels=in_channels,
# out_channels=n_outputs * 2,
# kernel_size=4,
# stride=2,
# padding=0,
# output_padding=0,
# bias=True)
#
# self.transp_conv1 = hk.Conv2DTranspose(
# output_channels=n_outputs * 2,
# kernel_shape=4,
# stride=2,
# with_bias=True,
# padding="SAME",
# name="transp_conv_1")
#
# self.transp_conv2 = nn.ConvTranspose2d(in_channels=n_outputs,
# out_channels=n_outputs,
# kernel_size=4,
# stride=2,
# padding=0,
# output_padding=0,
# bias=True)
#
# self.transp_conv2 = hk.Conv2DTranspose(
# output_channels=n_outputs,
# kernel_shape=4,
# stride=2,
# with_bias=True,
# padding="SAME",
# name="transp_conv_2")
#
# def forward(self, inputs: torch.Tensor, *,
# test_local_stats: bool = False) -> torch.Tensor: # TODO what is test_local_stats?
# out = inputs
# out = self.transp_conv1(out)
# out = F.relu(out)
# out = self.transp_conv2(out)
#
# return out
#
#
# class Conv3DUpsample(nn.Module):
# """Simple convolutional auto-encoder."""
#
# def __init__(self,
# n_outputs: int,
# n_time_upsamples: int = 2,
# n_space_upsamples: int = 4):
#
# super().__init__()
#
# self._n_outputs = n_outputs
# self._n_time_upsamples = n_time_upsamples
# self._n_space_upsamples = n_space_upsamples
#
# def forward(self, x: torch.Tensor) -> torch.Tensor:
# n_upsamples = max(self._n_time_upsamples, self._n_space_upsamples)
#
# time_stride = 2
# space_stride = 2
#
# for i in range(n_upsamples):
# if i >= self._n_time_upsamples:
# time_stride = 1
# if i >= self._n_space_upsamples:
# space_stride = 1
#
# channels = self._n_outputs * pow(2, n_upsamples - 1 - i)
#
# x = hk.Conv3DTranspose(output_channels=channels,
# stride=[time_stride, space_stride, space_stride],
# kernel_shape=[4, 4, 4],
# name=f"conv3d_transpose_{i}")(x)
# if i != n_upsamples - 1:
# x = F.relu(x)
#
# return x
``` |
{
"source": "jobrachem/alfred3-interact",
"score": 2
} |
#### File: tests/res/script-match_stepwise.py
```python
import alfred3 as al
import alfred3_interact as ali
exp = al.Experiment()
@exp.setup
def setup(exp):
mm = ali.MatchMaker("a", "b", exp=exp, admin_pw="test")
group = mm.match_stepwise()
exp.plugins.group = group
@exp.member
class Success(al.Page):
def on_first_show(self):
group = self.exp.plugins.group
role = group.me.role
self += al.Text(f"Matched to group: \t{group.group_id[-4:]}")
self += al.Text(f"Matched to role: \t{role}")
```
#### File: alfred3-interact/tests/test_spec.py
```python
import pytest
from alfred3_interact import ParallelSpec, SequentialSpec, IndividualSpec
class TestSequentialSpec:
def test_init(self):
spec = SequentialSpec("a", "b", nslots=5, name="test")
assert spec
class TestParallelSpec:
def test_init(self):
spec = ParallelSpec("a", "b", nslots=5, name="test")
assert spec
class TestIndividualSpec:
def test_init(self):
spec = IndividualSpec(nslots=5, name="test")
assert spec
``` |
{
"source": "jobrachem/alfred3_scheduler",
"score": 3
} |
#### File: alfred3_scheduler/tests/create.py
```python
from pathlib import Path
from datetime import datetime
from subprocess import run
import click
@click.command()
@click.option(
"-name", prompt="Enter a name for the test experiment", help="Name for test experiment"
)
def testexp(name):
"""Create a test experiment."""
timestamp = datetime.today().strftime("%Y%m%d%H%M")
dirname = timestamp + "-" + name
path = Path.cwd() / "exp" / dirname
path = path.resolve()
path.mkdir(parents=True)
run(
[
"alfred3",
"template",
f"--path={str(path)}",
],
check=True,
)
run(["code", path / "script.py"], check=True)
if __name__ == "__main__":
testexp() # pylint: disable=no-value-for-parameter
``` |
{
"source": "JoBrad/petl_fwf",
"score": 2
} |
#### File: petl_fwf/test/test_fwf_reader.py
```python
from __future__ import absolute_import, print_function, division
from tempfile import NamedTemporaryFile
import gzip
import bz2
import os
import io
from petl.test.helpers import ieq, eq_
from petl_fwf.fwf_reader import fromfwf
def test_fromfwf_no_header():
f = NamedTemporaryFile(delete=False, mode='wb')
f.write(b'Header text that should not be returned in results\n')
f.write(b' a 1\n')
f.write(b'b 2 \n')
f.write(b'c 3 \n')
f.write(b'Footer text that should not be returned in results\n')
f.close()
actual = fromfwf(f.name, encoding='ascii', widths=[3, 3], header=('col1', 'col2'),
skiprows=1, skipfooter=1)
expect = (('col1', 'col2'),
(' a', ' 1'),
('b ', '2 '),
('c ', '3 '))
ieq(expect, actual)
ieq(expect, actual) # verify can iterate twice
def test_fromfwf_with_header():
f = NamedTemporaryFile(delete=False, mode='wb')
f.write(b'c1 c2\n')
f.write(b' a 1\n')
f.write(b'b 2 \n')
f.write(b'c 3 \n')
f.write(b'Footer text that should not be returned in results\n')
f.close()
actual = fromfwf(f.name, encoding='ascii', widths=[3, 3], skipfooter=1)
expect = (('c1 ', ' c2'),
(' a', ' 1'),
('b ', '2 '),
('c ', '3 '))
ieq(expect, actual)
ieq(expect, actual) # verify can iterate twice
def test_test_fromfwf_gz_no_header():
# initial data
f = NamedTemporaryFile(delete=False)
f.close()
fn = f.name + '.gz'
os.rename(f.name, fn)
f = gzip.open(fn, 'wb')
try:
f.write(b'Header text that should not be returned in results\n')
f.write(b' a 1\n')
f.write(b'b 2 \n')
f.write(b'c 3 \n')
f.write(b'Footer text that should not be returned in results\n')
finally:
f.close()
expect = (('col1', 'col2'),
(' a', ' 1'),
('b ', '2 '),
('c ', '3 '))
actual = fromfwf(f.name, encoding='ascii', widths=[3, 3], header=('col1', 'col2'),
skiprows=1, skipfooter=1)
ieq(expect, actual)
ieq(expect, actual) # verify can iterate twice
```
#### File: JoBrad/petl_fwf/setup.py
```python
from __future__ import (print_function, unicode_literals)
from builtins import str
from ast import literal_eval
from setuptools import setup
def get_version(source='petl_fwf/__pkginfo__.py'):
with open(source) as f:
for line in f:
if line.startswith('__version__'):
return literal_eval(line.split('=')[-1].lstrip())
raise ValueError("__version__ not found")
setup(
name='petl_fwf',
version=get_version(),
author='JoBrad',
author_email='<EMAIL>',
package_dir={'': '.'},
packages=['petl_fwf', 'petl_fwf.test'],
install_requires=['petl'],
tests_require=['nose'],
url='https://github.com/JoBrad/petl_fwf',
license='MIT License',
description='Additional methods for the petl library that enable reading fixed-width files',
long_description=open('README.md').read(),
python_requires='>=2.7',
keywords='petl fixed-width text',
classifiers=['Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
``` |
{
"source": "JoBrad/pyColorClass",
"score": 3
} |
#### File: pyColorClass/colorClass/helpers.py
```python
import collections, copy, inspect, re, string, sys
from definedColors import __definedColors__
# Default color values to use when the provided color value cannot be used
DEFAULT_COLOR = '#000000'
DEFAULT_HEX_VALUE = '00'
DEFAULT_INT_VALUE = 0
# A list of values, in order, that we'll look for when parsing iterables
RGB_PARSER = ['r', 'g', 'b']
RGB_NAMES = ['red', 'green', 'blue']
# Non-whitespace characters that are turned into whitespace before splitting a potential RGB color string
SEPARATORS = ','
# RegEx pattern for a valid Hex character
VALID_HEX_CHARACTER = r'[0-9a-fA-F]'
# Match hex strings of various lengths
VALID_HEX_STRING_TEMPLATE = r'^\s*#?(%s{CHARACTERCOUNT})\s*$' % VALID_HEX_CHARACTER
HEX_PATTERNS = [
re.compile(VALID_HEX_STRING_TEMPLATE.replace('CHARACTERCOUNT', '2')),
re.compile(VALID_HEX_STRING_TEMPLATE.replace('CHARACTERCOUNT', '3')),
re.compile(VALID_HEX_STRING_TEMPLATE.replace('CHARACTERCOUNT', '6'))
]
# Python 2 vs 3 abstract collection class
if tuple(sys.version_info)[0] == 3:
collections = collections.abc
def __isIntType__(obj):
"""
Returns true if the obj is an integer
"""
return isinstance(obj, int)
def __isColorInt__(inputValue):
"""
Returns true if the provided value is an integer between 0 and 255.
"""
if __isIntType__(inputValue):
return 0 <= inputValue <= 255
else:
return False
def __isFloatType__(obj):
"""
Returns true if the obj is a float
"""
return isinstance(obj, float)
def __isColorPercent__(inputValue):
"""
Returns true if the provided value is a float between 0.0 and 1.0.
"""
if __isFloatType__(inputValue):
return 0.0 <= inputValue <= 1.0
else:
return False
def __isNumericType__(obj):
"""
Returns true if the obj is an int or float
"""
return True in [__isIntType__(obj), __isFloatType__(obj)]
def __isHexString__(inputValue):
"""
Returns True if the inputValue is a hex string.
"""
if __isStringType__(inputValue):
return len([res for res in [patt.match(inputValue) for patt in HEX_PATTERNS] if res is not None]) > 0
else:
return False
def __isStringType__(obj):
"""
Returns true if the obj is a str type
"""
try:
return hasattr(obj, 'capitalize')
except:
return False
def __isFunctionType__(obj):
"""
Returns true if the provided object is a function
"""
return inspect.isfunction(obj)
def __isIterableType__(obj):
"""
Returns true if the obj is an Iterable type
"""
return isinstance(obj, collections.Iterable)
def __isNonStringIterableType__(obj):
"""
Returns True if the provided value is an iterable, but is not a string
"""
if __isIterableType__(obj):
return __isStringType__(obj) is False
else:
return False
def __isListType__(obj):
"""
Returns True if the provided value is a list
"""
if __isIterableType__(obj):
return isinstance(obj, list)
else:
return False
def __isMappingType__(obj):
"""
Returns true if the obj is a Mapping type
"""
return isinstance(obj, collections.Mapping)
def __isTupleType__(obj):
"""
Returns True if the provided value is a tuple
"""
if __isIterableType__(obj):
return isinstance(obj, tuple)
else:
return False
def __isRGB__(obj):
"""
Returns true if the provided object is a 3-item tuple containing
integer values, or the value None.
"""
if __isTupleType__(obj):
return len([True for item in obj if __isColorInt__(item) or item is None]) == 3
else:
return False
def __toInt__(obj, base = 10):
"""
If the provided object is an integer, it is returned.
If it is not, then it will try to return the value as an integer.
If a base is provided, and the provided object is not a number,
then it will be used to change the base of the provided number.
If an error occurs, None is returned.
"""
try:
if __isNumericType__(obj) is True:
return int(obj)
elif base == 10:
return int(__toFloat__(obj))
else:
return int(obj, base)
except:
return None
def __getColorInt__(inputValue):
"""
Returns an integer between 0 and 255 from the provided numeric value.
If the value cannot be converted to an integer, or is not between 0
and 255 then None is returned.
Conversion order:
* If the value is an integer, it is returned
* If the value is a float between 0 and 1, it is multiplied by 255
and then reprocessed
* If the value is a float between 0 and 255, it is converted to an
integer and reprocessed
* If the value is a string, it is converted to a float,
and reprocessed
"""
if __isColorInt__(inputValue):
return inputValue
else:
if __isColorPercent__(__toFloat__(inputValue)):
returnValue = __toInt__(__toFloat__(inputValue) * 255)
elif __isColorInt__(__toInt__(inputValue)):
returnValue = __toInt__(inputValue)
else:
returnValue = None
return __validate__(returnValue, __isColorInt__, None)
def __intToHex__(inputValue):
"""
Returns a hexadecimal string representation of the provided value.
If the provided value
Returns DEFAULT_HEX_VALUE if the value cannot be converted.
"""
if __isHexString__(inputValue):
return __getHexString__(inputValue)
else:
try:
returnValue = '%02x'.upper() % __getColorInt__(inputValue)
return __validate__(returnValue, __isHexString__, DEFAULT_HEX_VALUE)
except:
return DEFAULT_HEX_VALUE
def __toFloat__(obj):
"""
If the provided object is a float, it is returned.
If it is not, then it will try to return the value as a float.
If this fails, None is returned.
"""
try:
if __isFloatType__(obj):
return obj
else:
return float(obj)
except:
return None
def __getColorPercent__(inputValue):
"""
Returns a float between 0 and 1 from the provided value. If the
value cannot be converted to a float, or is not between 0.0 and 1.0
then None is returned.
Conversion order:
* If the value is a float, it is returned
* If the value is an integer, it is returned
* If the value is a string with all digits, it is
converted using __toFloat__
* If it is a string that hasn't been caught by the previous tests,
the function tries to convert the value by shifting its base.
Returns a float between 0 and 1 from an integer value
that is between 0 and 255.
If the provided value is an float between 0 and 1,
it is returned as-is. Otherwise None is returned.
Values that exceed 255, or values that cannot be converted
are returned as None.
"""
# Is this already a valid float?
if __isColorPercent__(inputValue):
return inputValue
else:
if __isColorInt__(inputValue) or __isColorInt__(__toInt__(inputValue)):
returnValue = __toFloat__(inputValue / 255.0)
elif __isColorPercent__(__toFloat__(inputValue)):
returnValue = __toFloat__(inputValue)
else:
returnValue = None
return __validate__(returnValue, __isColorPercent__, None)
def __getHexString__(inputValue):
"""
Returns a hexadecimal string from the provided value, or None, if
the value cannot be converted to hexadecimal.
Hex strings will be upper case, trimmed, and will not have a leading
hash symbol.
If the hex value is 3 characters, it is converted to a 6-character
value. Other lengths are left as-is.
Numeric values are converted to an integer, and then reprocessed.
If the value is not a valid hex string, None is returned.
Examples:
'FF' => 'FF'
255 => 'FF'
128 => '80'
1.0 => 'FF'
"""
if __isStringType__(inputValue):
matchResults = __getElement__([res.groups()[0] for res in [patt.match(inputValue) for patt in HEX_PATTERNS] if res is not None])
if matchResults is not None:
hexString = matchResults.upper()
# Expand 3-character hex strings -> 6
if len(hexString) == 3:
hexString = ''.join([hexString[0] * 2, hexString[1] * 2, hexString[2] * 2])
return hexString
elif __isColorInt__(inputValue):
return __intToHex__(inputValue)
elif __isColorPercent__(inputValue):
return __getHexString__(__getColorInt__(inputValue))
return None
def __hexToInt__(inputValue):
"""
Returns an integer value from the hex input.
Returns None if the value cannot be converted.
"""
if __isColorInt__(inputValue):
return inputValue
elif __isHexString__(inputValue):
return __toInt__(__getHexString__(inputValue), 16)
else:
return None
def __cleanString__(inputString):
"""
If the provided value is a string, it is trimmed and lower-cased.
Otherwise the provided value is returned, as-is
"""
try:
if __isStringType__(inputString):
return inputString.strip().lower()
else:
return inputString
except:
return inputString
def __rgbToHex__(values):
"""
Returns a formatted hex string created from the provided RGB values
"""
returnValue = DEFAULT_COLOR
if __isHexString__(values):
returnValue = values
elif __isRGB__(values):
returnValue = ''.join([__intToHex__(item) or '00' for item in __rgbFromValue__(values)])
return __formatHexString__(returnValue)
def __formatHexString__(inputValue):
"""
Returns a formatted hex string from the provided string
"""
hexString = __getHexString__(inputValue)
if hexString is not None:
if len(hexString) > 2:
return '#%s' % hexString
else:
return hexString
else:
return None
def __splitString__(inputString):
"""
Replaces any instances of SEPARATORS with a space, and then splits the
provided string.
If the value cannot be split, None is returned.
"""
try:
splitString = inputString.strip().expandtabs(2)
return ''.join([splitString.replace(sepString, ' ') for sepString in SEPARATORS]).split()
except:
return None
def __validate__(inputValue, validationFunction, fallbackValue = None):
"""
Executes the validation function on the provided value. If it passes, then
the value is returned. If the function fails, the fallback value is returned.
"""
assert __isFunctionType__(validationFunction), 'The provided function was not a function!'
try:
if validationFunction(inputValue) == True:
return inputValue
else:
return fallbackValue
except:
return fallbackValue
def __copyList__(fromList, initialValues = None):
"""
Returns a copy of the provided list. Initial values must either be a single value, or
a list of exactly the same size as the provided list.
"""
if __isListType__(fromList) is False:
raise ValueError('The provided value to copy was not a list!')
fromList = copy.deepcopy(fromList)
if initialValues is not None:
initialValues = copy.deepcopy(initialValues)
if initialValues is None or __isNonStringIterableType__(initialValues) is False:
copySingleValue = True
elif __isNonStringIterableType__(initialValues) and len(initialValues) == 1 or __isListType__(initialValues) is False:
# Treat an initialValue object with 1 element the same as a non-iterable, so we could set every value to a list, or to a non-list value
copySingleValue = True
else:
if len(initialValues) != len(fromList):
raise ValueError('The initial values list must be the same size as the list to copy!')
else:
copySingleValue = False
returnList = fromList[:]
for itemIndex in range(len(returnList)):
if copySingleValue is True:
returnList[itemIndex] = initialValues
else:
returnList[itemIndex] = initialValues[itemIndex]
return returnList
def __flatten__(obj):
"""
Always returns a tuple.
If the provided object is None, a non-iterable, or a string, the tuple will
have 1 item, which is the provided object.
If the provided object is a dictionary, then each returned tuple is the result
of the .items() method of that dictionary.
If the provided object is another iterable type, then the iterable is recursively
using the rules above.
Examples:
__flatten__(1) => (1,)
__flatten__('a') => ('a',)
__flatten__([[[1, 2, 3]]]) => (1, 2, 3)
__flatten__(tuple([[[[1, 2, 3]]], [1, 2, 3]])) => (1, 2, 3, 1, 2, 3)
__flatten__({'a': 1, 'b': 2}) => (('a', 1), ('b', 2))
__flatten__([[[[1, 2, 3]]], [[{'a': 1, 'b': 2}], [1, 2, 3]], 1, ['s'], set([1, 2, 3, 4), 's']) => (1, 2, 3, ('a', 1), ('b', 2), 1, 2, 3, 1, 's', 1, 2, 3, 4, 's')
"""
returnObj = []
# Avoid parsing a tuple that looks just like what this function would return
if isinstance(obj, tuple) and len(obj) == 1 and __isNonStringIterableType__(obj[0]) is False:
return obj
elif __isNonStringIterableType__(obj) is False:
returnObj.append(obj)
elif __isMappingType__(obj):
returnObj = [item for item in obj.items()]
elif __isNonStringIterableType__(obj):
for item in obj:
if __isNonStringIterableType__(item):
[returnObj.append(flatObj) for flatObj in __flatten__(item)]
else:
returnObj.append(item)
elif obj is not None:
returnObj = [obj]
return tuple(returnObj)
def __getElement__(inputList, index = 0, fallbackValue = None):
"""
Returns the element at the specified index, in the provided list.
If this fails for any reason, then the fallback value is returned.
"""
try:
return inputList[index]
except:
return fallbackValue
def __getValues__(values = None):
"""
Returns the provided values, as a tuple. Note that a tuple is *always*
returned, even if nothing, or just a single value, is passed to the
function. Values should be in the same order they were provided, but I
haven't performed any regression testing to verify this. If the provided
values are non-string iterables, they are flattened in-place using
the __flatten__ function.
Example:
[[[a], [b, c]], {'d': 1}] => (a, b, c, (d, 1),)
"""
if values is None:
return tuple()
elif __isNonStringIterableType__(values):
flattendValues = __flatten__(values)
return flattendValues
else:
return (values,)
def __getColorTupleFromElement__(tupleValue):
"""
Expects a 2-value tuple, usually the result of the enumerate function,
where the first value is the index of the iterable that the item was in,
and the second value is either a 2-element tuple, or a single value.
If the second value of the provided tuple is a 2-value tuple, then it is
checked to see if its first value starts with an RGB color name(using
the RGB_PARSER list), and if the second value is a number. If both of
these pass, then the resulting tuple will be the color and the second
will be the integer value of the provided number.
If the second value of the provided tuple is a single value, then the
first value is used to get the index of the RGB color from RGB_PARSER,
which is returned as the color name, and the integer value of the
provided number is used for the second value.
If the value cannot be associated with a color, then the result will be
an empty tuple.
If the value can be associated with a color, but the number cannot be
processed, then the value for that color will be None.
Since the RGB_PARSER value is used, additional values may be supported
later on.
These examples may help:
RGB_PARSER = ['r', 'g', 'b']
(0, ('red', 128)) => ('r', 128) # 'red' starts with 'r'
(0, 'red', 128) => ('r', 128) # 'red' starts with 'r'
(0, ('Gr', 128)) => ('g', 128)
(0, ('BLUE', 128)) => ('b', 128)
(0, ('ZULU', 128)) => (None, None) # There is no color that starts with z
(0, (128,)) => ('r', 128) # Index 0 is 'r' in RGB_PARSER
(0, 128) => ('r', 128) # Index 0 is 'r' in RGB_PARSER
(1, 128) => ('g', 128)
(2, 128) => ('b', 128)
(2, 455) => ('b', None) # 455 is not a valid int value
(2, 'FF') => ('b', 255) # 'FF' converted to int value
(2, 455) => ('b', None) # 455 is not a valid int value
(3, 128) => (None, None) # There is no index 3 in RGB_PARSER
"""
colorName = None
if __isTupleType__(tupleValue):
parsedValue = __getValues__(tupleValue)
if len(parsedValue) == 2:
colorName = __getElement__(RGB_PARSER, __toInt__(parsedValue[0]))
colorValue = __getColorInt__(parsedValue[1])
elif len(parsedValue) == 3:
colorIndex = __getElement__([rgbIndex for rgbIndex in range(len(RGB_PARSER)) if str(parsedValue[1]).strip().lower().startswith(RGB_PARSER[rgbIndex].strip().lower())])
return __getColorTupleFromElement__(tuple([colorIndex, parsedValue[2]]))
if colorName is None:
return tuple([None, None])
else:
return tuple([colorName, colorValue])
def __parseIterableValue__(iterableValue):
"""
Tries to extract a hex value from the provided iterable.
Returns None if unsuccessful.
"""
# Copy the parser. We'll return this structure, regardless of the outcome
rgb = RGB_PARSER[:]
if __isRGB__(iterableValue):
return __rgbToHex__(iterableValue)
elif __isNonStringIterableType__(iterableValue):
# Handle a dictionary with rgb key/value pairs OR a list of values, in RGB order.
parseValues = dict((colorTuple[0], colorTuple[1]) for colorTuple in (__getColorTupleFromElement__(item) for item in enumerate(iterableValue)) if colorTuple[0] is not None)
# Update the appropriate color values
rgb = [parseValues.get(rgb[rgbIndex], None) for rgbIndex in range(len(RGB_PARSER))]
return tuple(rgb)
else:
return tuple([None for i in rgb])
def __parseStringValue__(stringValue):
"""
Tries to extract a hex value from the provided string.
Returns None if unsuccessful.
"""
if __isHexString__(stringValue):
return __getHexString__(stringValue)
elif __isStringType__(stringValue):
stringValue = stringValue.lower().strip()
if stringValue in COLORS:
return COLORS[stringValue]
else:
return __hexFromValue__(__splitString__(stringValue))
else:
return None
def __hexStringToRGB__(hexString):
"""
Returns a tuple of RGB values from the provided hexString.
"""
rgb = [DEFAULT_INT_VALUE for item in RGB_PARSER]
if __isHexString__(hexString):
providedString = __getHexString__(hexString)
for colorIndex in range(0, len(providedString), 2):
rgb[colorIndex / 2] = __hexToInt__(providedString[colorIndex: colorIndex + 2])
return tuple(rgb)
def __hexFromValue__(*inputValue):
"""
Returns a hex value from the provided value, with invalid values replaced
with default values.
"""
return __rgbToHex__(__rgbFromValue__(inputValue))
def __rgbFromValue__(*inputValue):
"""
The main parsing function. Attempts to return an RGB tuple
from the provided values.
"""
parseValue = __getValues__(inputValue)
if __isRGB__(parseValue):
return parseValue
# Parse as a hex string, or as the red color of an RGB pair
elif len(parseValue) == 1:
parseValue = parseValue[0]
returnValue = __parseStringValue__(parseValue)
if returnValue is not None:
return __hexStringToRGB__(returnValue)
else:
# If it's a number, we'll assume it's the red color of an RGB set
return __rgbFromValue__(__getColorInt__(parseValue), None, None)
elif len(parseValue) > 1:
return __parseIterableValue__(parseValue)
else:
return tuple([None, None, None])
class __const__(object):
"""
A subclass of object that does not allow existing properties to be updated. New values can be added.
Properties can be referenced like normal object properties, or like a dictionary.
New values can only be valid colors, and will be converted to hex strings.
"""
__slots__ = ['__colorValues__', '__colorNames__']
def __init__(self):
__colorValues__ = dict(__definedColors__)
__colorNames__ = dict((__cleanString__(key), key) for key in __colorValues__.keys())
pass
def __contains__(self, lookupKey):
"""
Returns true if lookupKey is in this object. Case does not matter.
"""
return self.has_key(lookupKey)
def __get__(self, lookupKey, defaultValue = None):
"""
Returns the value of a property. If it does not exist, the default value is returned.
"""
try:
return self.__getitem__(lookupKey)
except AttributeError as err:
return defaultValue
def __getattr__(self, lookupKey, defaultValue = None):
"""
Returns the value of a property. If it does not exist, the default value is returned.
"""
try:
return self.__getitem__(lookupKey)
except AttributeError as err:
return defaultValue
def __getitem__(self, lookupKey):
"""
Returns the value corresponding to the lookupKey.
"""
lookupKey = self.get_key(lookupKey)
if lookupKey is not None:
return self.__colorValues__[lookupKey]
else:
raise AttributeError("No such property: %s" % lookupKey)
def __set__(self, lookupKey, *newValue):
"""
Adds a property with a value, but will not update an existing value.
"""
self.__setitem__(lookupKey, newValue)
def __setattr__(self, lookupKey, *newValue):
"""
Adds a property with a value, but will not update an existing value.
"""
self.__setitem__(lookupKey, newValue)
def __setitem__(self, lookupKey, *newValue):
"""
Adds a property with a value, but will not update an existing value.
"""
if __isStringType__(lookupKey):
if lookupKey not in self.__colorNames__:
cleanKey = __cleanString__(lookupKey)
self.__colorValues__[lookupKey] = __hexFromValue__(newValue)
self.__colorNames__[cleanKey] = lookupKey.strip()
else:
raise KeyError('Cannot overwrite an existing key value!')
else:
raise TypeError('The property key must be a string!')
def __dir__(self):
"""Returns the list of properties for the object"""
return dir(self.__class__) + [str(k) for k in self.__colorValues__.keys()]
def has_key(self, lookupKey):
"""
Returns true if lookupKey is in this object. Case does not matter.
"""
return self.get_key(lookupKey) is not None
def get_key(self, lookupKey):
"""
Performs a caseless search on the object's keys to find lookupKey. If it
exists, the first matched key (with original casing) is returned.
"""
lookupValue = __cleanString__(lookupKey)
if lookupValue in self.__colorNames__:
return self.__colorNames__[lookupValue]
else:
return None
COLORS = __const__()
``` |
{
"source": "JoBrad/pyUtils",
"score": 2
} |
#### File: JoBrad/pyUtils/rename_files_using_date.py
```python
from functools import partial
import re
from pathlib import Path, PurePosixPath, PureWindowsPath
import stat
from typing import (Dict, Set, List, Tuple, Sequence, Union, Pattern, Match, overload, Iterator)
from datetime import datetime as dt
from utils import futils
# from utils import dtutils
# MONTHS = dtutils.MONTHS
"""
Really raw WIP - this is a side thing that I've been doing on-demand, so it
has a lot of unused code, and basically is a mess.
Renames files in the provided directory using a date value in the file name,
or based on the attributes of the file.
Has only been tested in Windows, but the ultimate goal is for it to work across OS types.
"""
PATH_TYPE = Union[PurePosixPath, PureWindowsPath]
PATH_SEQ_TYPE = Sequence[PATH_TYPE]
PATTERN_TYPE = Pattern
MATCH_SEQ_TYPE = Sequence[Match]
STR_BOOL_TUPLE = Tuple[str, bool]
MATCH_STRING_TUPLE = Tuple[Match, str]
RE_IM = re.IGNORECASE + re.MULTILINE
INPUT_PATHS = [
Path('INPUT PATH')
]
EXCLUDE_LIST = ['~', '.cache', '.git', '.idea', '.project', '.iml', '.vscode', 'desktop.ini'] # type: List[str]
DELETE_LIST = ['.DS_Store', 'Thumbs.db'] # type: List[str]
# TODO: Combine all of these data templates and patterns into another module
YEAR_TEMPLATE = '(?:20|19)[0-9][0-9]' # type: str
SHORT_YEAR_TEMPLATE = '[0-9][0-9]' # type: str
LONG_OR_SHORT_YEAR_TEMPLATE = '{year_pattern}|{short_year_pattern}'.format(
year_pattern = YEAR_TEMPLATE,
short_year_pattern = SHORT_YEAR_TEMPLATE
) # type: str
MONTH_TEMPLATE = '[1-9]|0[0-9]|1[0-2]' # type: str
DAY_TEMPLATE = '0[0-9]|[1-2][0-9]|3[0-1]|[1-9]' # type: str
DAY_YEAR_MONTH_TEMPLATE = '\\b(?P<day>{day_pattern}) ?(?P<year>{year_pattern}) ?(?P<month>{month_pattern})'.format(
year_pattern = YEAR_TEMPLATE,
month_pattern = MONTH_TEMPLATE,
day_pattern = DAY_TEMPLATE
) # type: str
MONTH_AND_YEAR_TEMPLATE = '((?P<year1>{year_pattern})\\b\\s*(?P<month1>{month_pattern})|(?P<month2>{month_pattern})\\b\\s*(?P<year2>{year_pattern}))'.format(
year_pattern = LONG_OR_SHORT_YEAR_TEMPLATE,
month_pattern = MONTH_TEMPLATE
) # type: str
# Match month names to month numbers
MONTH_REPLACEMENT_TEMPLATES = {
'(?:january|jan|01)': '01',
'(?:february|feb|02)': '02',
'(?:march|mar|03)': '03',
'(?:april|apr|04)': '04',
'(?:may|05)': '05',
'(?:june|jun|06)': '06',
'(?:july|jul|07)': '07',
'(?:august|aug|08)': '08',
'(?:september|sept|sep|09)': '09',
'(?:october|oct|10)': '10',
'(?:november|nov|11)': '11',
'(?:december|dec|12)': '12'
} # type: Dict[str, str]
# August 2016 / Aug 2016 / 08 2016
M_YEAR_TEMPLATE = '\\b(?P<month>{month_pattern})\'(?P<year>{year_template})\\b' # type: str
# 2016 08 02
ISO_DATE_TEMPLATE = '\\b(?P<year>{year_pattern}) ?(?P<month>{month_pattern}) ?(?P<day>{day_pattern})\\b'.format(
year_pattern = LONG_OR_SHORT_YEAR_TEMPLATE,
month_pattern = MONTH_TEMPLATE,
day_pattern = DAY_TEMPLATE
) # type: str
# 08 02 2016
US_DATE_TEMPLATE = '\\b(?P<month>{month_pattern}) ?(?P<day>{day_pattern}) ?(?P<year>{year_pattern})\\b'.format(
month_pattern = MONTH_TEMPLATE,
day_pattern = DAY_TEMPLATE,
year_pattern = LONG_OR_SHORT_YEAR_TEMPLATE
) # type: str
# Patterns = compiled RegEx templates
MONTH_REPLACEMENT_PATTERNS = {
re.compile(pattern='\\b({month_pattern})\\b'.format(month_pattern=k), flags=RE_IM): v
for k, v in MONTH_REPLACEMENT_TEMPLATES.items()
} # type: Dict[PATTERN_TYPE, str]
# Apr'16
M_YEAR_PATTERNS = {
re.compile(
pattern=M_YEAR_TEMPLATE.format(
month_pattern=k,
year_template=LONG_OR_SHORT_YEAR_TEMPLATE
),
flags=RE_IM
): v
for k, v in MONTH_REPLACEMENT_TEMPLATES.items()
} # type: Dict[PATTERN_TYPE, str]
# MM dd yyyy
US_DATE_PATTERN = re.compile(
pattern=US_DATE_TEMPLATE,
flags=RE_IM
) # type: Pattern
# dd yyyy dd
DAY_YEAR_MONTH_PATTERN = re.compile(
pattern=DAY_YEAR_MONTH_TEMPLATE,
flags=RE_IM
) # type: Pattern
# yyyy MM dd
LONG_DATE_PATTERN = re.compile(
pattern=ISO_DATE_TEMPLATE,
flags=RE_IM
) # type: Pattern
# yyyy MM or MM yyyy
MONTH_YEAR_PATTERN = re.compile(
pattern=MONTH_AND_YEAR_TEMPLATE,
flags=RE_IM
) # type: Pattern
YEAR_PATTERN = re.compile(
pattern='(?:\'?\\b(?P<year>{year_pattern}))\\b'.format(
year_pattern = LONG_OR_SHORT_YEAR_TEMPLATE
),
flags=RE_IM
) # type:PATTERN_TYPE
MONTH_PATTERN = re.compile(
pattern='\\b(?P<month>{month_pattern})\\b'.format(
month_pattern = MONTH_TEMPLATE
),
flags=RE_IM
) # type: Pattern
WHITESPACE_PATTERN = re.compile('\s', RE_IM) # type: PATTERN_TYPE
SEPARATOR_PATTERN = re.compile(pattern='([ \\.\\,\\_\\-\\+])') # type: PATTERN_TYPE
BRACKET_PATTERN = re.compile(pattern='([\\(\\)\\[\\]\\{\\}])') # type: PATTERN_TYPE
format_year_string = lambda year_string: year_string if len(year_string.strip()) == 4 else '20{0}'.format(year_string.strip())
format_day_or_month_string = lambda day_or_month_string: day_or_month_string.strip().zfill(2)
def get_matches(input_string:str, search_pattern:Pattern) -> Iterator[Match]:
"""
Moves from left to right, in input_string, yielding each match from search_pattern
until there are no more matches, when None is returned
"""
start_pos = 0 # type: int
search_result = search_pattern.search(input_string, start_pos) # type: Match
while search_result is not None:
yield search_result # type: Match
start_pos = search_result.span()[1]
search_result = search_pattern.search(input_string, start_pos)
def match_patterns(input_string:str, search_patterns:Union[Dict[Pattern, str], List[Pattern]]) -> List[Pattern]:
"""
Returns a List of all patterns in search_patterns that matched input_string. If none
of the patterns matched, or if there was an error, an empty List is returned.
"""
return {pattern: None if isinstance(search_patterns, List) else search_patterns[pattern] for pattern in search_patterns if pattern.search(str(input_string)) is not None} # type: Dict[Pattern, Union[str, None]]
@partial
def execute_on_matches(func:callable, input_string:str, search_patterns:Union[Dict[Pattern, str], List[Pattern]]) -> Tuple[str, bool]:
"""
For each matching pattern in search_patterns, passes input_string and the result to func
Returns Tuple[return_string, made_a_match] where return_string will be the result of func and True,
or input_string with no changes, and False, if no matches were found in search_patterns
"""
return_string = str(input_string) # type:str
made_a_match = False # type: bool
matching_patterns = match_patterns(input_string, search_patterns) # type: List[Pattern]
if len(matching_patterns) > 0:
for matching_pattern in matching_patterns: # type; Pattern
made_a_match = True
if isinstance(search_patterns, Dict):
str_value = search_patterns[matching_pattern] # type: Union[None, str]
else:
str_value = None
for match in get_matches(return_string, matching_pattern):
return_string = func(return_string, (matching_pattern, str_value))
return (return_string, made_a_match)
@partial
def execute_on_file_stem(func:callable, full_file_path:Union[str, Path], **kwargs) -> Tuple[Path, bool]:
"""
Calls func(provided_file_stem, **kwargs), which should return Tuple[str, made_a_change],
where str is the provided string, with any changes, and made_a_change is a boolean indicating
whether changes were made.
The returned string is returned as the stem of the provided full_file_path, as a Path object
"""
try:
file_obj, file_parent, filename, file_suffix = get_file_parts(full_file_path)
except AttributeError:
raise
return_string, made_a_change = func(filename, **kwargs) # type: str, bool
new_filename = '{0}{1}'.format(return_string, file_suffix) # type: str
return (Path.joinpath(file_parent, new_filename), made_a_change)
def format_m_year_execute(input_string:str, match_pattern:Tuple[Pattern, str]) -> str:
"""
Core of loop for format_m_year_strings
"""
return_string = str(input_string) # type:str
search_pattern, month_number = match_pattern # type: Pattern, str
search_result = search_pattern.search(return_string) # type: Match
string_to_replace, year = search_result.group(0), format_year_string(search_result.group('year')) # type: str, str
return_string = replace_and_prepend(return_string, string_to_replace, '{0} {1} '.format(year, month_number))
return return_string
def format_m_year_strings(input_string: str) -> Tuple[str, bool]:
"""
Looks for a m'year value in the string. If it finds
one, then it moves it to the front of the string
Returns a tuple (return_string:str, made_a_match:bool)
"""
return execute_on_matches(format_m_year_execute, input_string, M_YEAR_PATTERNS)
def format_month_string_execute(input_string:str, match_pattern:Tuple[Pattern, str]) -> str:
"""
Core of loop for format_month_strings_with_numbers function
"""
return_string = str(input_string) # type:str
search_pattern, month_number = match_pattern # type: Match, str
return search_pattern.sub(month_number, return_string)
def format_month_strings_with_numbers(input_string:str) -> Tuple[str, bool]:
"""
Replaces month names with their padded numeric equivalent
"""
return execute_on_matches(format_month_string_execute, input_string, MONTH_REPLACEMENT_PATTERNS)
def format_day_year_month_execute(input_string:str, match_pattern:Tuple[Pattern, None]) -> str:
"""
Core of loop for format_day_year_month_date_string
"""
return_string = str(input_string) # type:str
search_result = match_pattern[0].search(return_string) # type: Match
replacement_string = '{0} {1} {2}'.format(search_result.group('year'), search_result.group('month'), search_result.group('day')) # type: str
return input_string.replace(search_result.group(0), replacement_string)
def format_day_year_month_date_string(input_string:str) -> Tuple[str, bool]:
"""
Replaces dates with the format dd yyyy MM with yyyy MM dd format
"""
return execute_on_matches(format_day_year_month_execute, input_string, [DAY_YEAR_MONTH_PATTERN])
def format_us_date_strings_execute(input_string:str, match_pattern:Tuple[Pattern, None]) -> str:
"""
Core of loop for format_us_date_strings
"""
return_string = str(input_string) # type:str
search_result = match_pattern[0].search(return_string) # type: Match
replacement_string = '{0} {1} {2}'.format(
format_year_string(search_result.group('year')),
format_day_or_month_string(search_result.group('month')),
format_day_or_month_string(search_result.group('day'))
) # type: str
return return_string.replace(search_result.group(0), replacement_string)
def format_us_date_strings(input_string:str) -> Tuple[str, bool]:
"""
Re-arranges US-style date formats (MM-dd-yyyy) to yyyy-MM-dd style
Un-padded month and day values are also matched.
Years without a century value will be assumed to be after 2000.
"""
return execute_on_matches(format_us_date_strings_execute, input_string, [US_DATE_PATTERN])
def format_year_month_execute(input_string:str, match_pattern:Tuple[Pattern, None]) -> str:
"""
Core of loop for format_year_month_strings
"""
return_string = str(input_string) # type:str
search_result = match_pattern[0].search(return_string) # type: Match
replacement_string = '{0} {1}'.format(
format_year_string(search_result.group('year1') or search_result.group('year2')),
format_day_or_month_string(search_result.group('month1') or search_result.group('month2'))
) # type: str
return return_string.replace(search_result.group(0), replacement_string)
def format_year_month_strings(input_string:str) -> Tuple[str, bool]:
"""
Formats MM yyyy date strings as yyyy MM
"""
return execute_on_matches(format_year_month_execute, input_string, [MONTH_YEAR_PATTERN])
def remove_double_spaces(input_string:str) -> str:
"""
Replaces double spaces with single spaces, in the provided string
"""
return ' '.join(WHITESPACE_PATTERN.sub(' ', input_string).split())
def clean_up_name(input_string:str) -> Tuple[str, bool]:
"""
Replaces .,_-+%20 with spaces
Replaces unicode spaces with standard spaces
Replaces double spaces with single spaces
Removes trailing and leading spaces
Removes ([{}])
"""
filename = str(input_string).strip()
# Replace separators with spaces
new_filename = re.sub(SEPARATOR_PATTERN, ' ', filename)
# Replace %20 with space
new_filename = new_filename.replace('%20', ' ')
# Replaces double spaces
new_filename = remove_double_spaces(new_filename)
# Remove brackets
new_filename = re.sub(BRACKET_PATTERN, '', new_filename).strip()
return (new_filename, new_filename.endswith(filename))
def fix_date_strings(input_string:str) -> Tuple[str, bool]:
"""
Looks for several date formats in the provided string, and replaces
them with a date with the most complete format that can be found,
from the list below:
yyyy MM dd
yyyy MM
yyyy
Operational order
* Replace mmm'yy or mmm'yyyy with yyyy MM
* Replace dd yyyy MM with yyyy MM dd
* Replace MM dd yyyy with yyyy MM dd
Returns Tuple[return_string, made_a_match]
If no changes were made, the provided string is returned, without any changes.
"""
return_string = str(input_string).strip() # type:str
made_a_match = False # type: bool
date_funcs = (
format_m_year_strings,
format_month_strings_with_numbers,
format_day_year_month_date_string,
format_us_date_strings
)
# Only try these if we weren't able to find matches from date_funcs
additional_date_patterns = [
YEAR_PATTERN,
MONTH_PATTERN
]
for date_func in date_funcs:
return_string, matched = date_func(return_string) # type: str, bool
made_a_match = max(made_a_match, matched)
if made_a_match is True:
return (return_string, made_a_match)
else:
matching_patterns = match_patterns(return_string, additional_date_patterns)
for matching_pattern in matching_patterns:
if matching_pattern == YEAR_PATTERN:
format_func = format_year_string
group_name = 'year'
else:
format_func = format_day_or_month_string
group_name = 0
made_a_match = True
for date_match in get_matches(return_string, matching_pattern): # type: Match
return_string = return_string.replace(date_match.group(0), format_func(date_match.group(group_name)))
break
if made_a_match is False:
return (input_string, made_a_match)
else:
return (return_string, made_a_match)
def replace_and_prepend(input_string:str, search_string: str, replacement_string:str=None, prepend_string:str=None) -> str:
"""
If search_string is in input_string, it is replaced with replacement_string,
the string is then trimmed, prepended with prepend_string, and returned.
If search_string is not in input_string, the original string is returned.
"""
return_string = input_string
if prepend_string is None:
prepend_string = ''
if search_string in input_string:
return remove_double_spaces('{0}{1}'.format(prepend_string, re.sub(search_string, replacement_string, return_string).strip()))
else:
return input_string
def get_best_date_string(input_string:str, start_pos:int=0) -> Match:
"""
Returns the most complete date string found in input_string,
starting at start_pos.
If no match is found, then None is returned.
"""
provided_string = str(input_string) # type: str
date_patterns = [
LONG_DATE_PATTERN,
MONTH_YEAR_PATTERN,
YEAR_PATTERN
]
for date_pattern in match_patterns(provided_string, date_patterns):
for search_result in get_matches(provided_string, date_pattern):
yield search_result
break
def add_file_date(file_name:str, full_file_path:Union[Path, str]) -> str:
"""
Looks for the first, most complete date string in the stem of the provided
file. If that date is missing a year and/or month value, then those
values will be retrieved from either the parent folder name, or the file's
modified timestamp. A day value will not be used unless it is already
in the filename.
Any date string retrieved from the filename will be moved to the
begining of the string, in the format yyyy MM dd or yyyy MM.
"""
file_path_obj = Path(full_file_path)
if file_path_obj.is_file() is False:
raise AttributeError('You must provide the file path to this function!')
input_string = str(file_name)
date_parts = ('year', 'month', 'day')
file_name_date = {k: None for k in date_parts}
string_to_replace = '' # type:str
if YEAR_PATTERN.search(str(file_path_obj.parent)) is not None:
file_name_date['year'] = YEAR_PATTERN.search(str(source_file.parent)).group('year')
else:
file_name_date['year'] = str(dt.fromtimestamp(source_file.stat().st_mtime).year)
if MONTH_PATTERN.search(str(file_path_obj.parent)) is not None:
file_name_date['month'] = MONTH_PATTERN.search(str(file_path_obj.parent)).group('month')
else:
file_name_date['month'] = str(dt.fromtimestamp(source_file.stat().st_mtime).month)
# Get the best date we have
for date_match in get_date_strings(input_string):
string_to_replace = date_match.group(0)
found_date_parts = [k.strip().lower() for k in date_match.groupdict().keys() if k.strip().lower() in date_parts]
for date_part in found_date_parts:
file_name_date[date_part] = date_match.groups(date_part)
break
best_date_string = '{0} {1} '.format(format_year_string(file_name_date['year']), format_day_or_month_string(file_name_date['month']))
if file_name_date['day'] is not None:
best_date_string = '{0}{1} '.format(best_date_string, format_day_or_month_string(file_name_date['day']))
return_string = replace_and_prepend(input_string=input_string, search_string=string_to_replace, prepend_string=best_date_string)
def move_date_to_start_of_string(input_string:str) -> str:
"""
Finds the best date string, and moves it to the begining of the string
"""
try:
best_date_strings = [date_string_match for date_string in get_best_date_string(input_string)]
date_start_pos = best_date_strings[0].span()[0]
date_end_pos = best_date_strings[len(best_date_strings) - 1].span()[1]
date_string = input_string[date_start_pos:date_end_pos]
except Exception as err:
a = 1
return input_string
return replace_and_prepend(input_string=input_string, search_string=date_string, prepend_string=date_string)
def get_file_parts(file_obj:Union[str, Path]) -> Tuple[Path, Path, str, str]:
"""
Returns Tuple[file_path_obj, file_parent_obj, file_stem, file_suffix]
"""
source_file = futils.get_clean_path(file_obj)
if source_file.parent == '.' or source_file.is_file() is False:
raise AttributeError('You must provide a complete file path to this function!')
return (source_file, source_file.parent, source_file.stem, source_file.suffix)
def apply_renaming_rules(filename:Union[PATH_TYPE, str], **kwargs:Dict[str, bool]) -> PATH_TYPE:
"""
Applies some basic renaming rules to the file, and renames it, if neccesary
Available options:
* clean_names: Removes junk from the file name
* fix_dates: Re-formats dates in the file name to yyyy MM dd format
* add_file_date Adds the year and/or month to the file date, if it is not present
This is done by using dates from the parent folder name or the
file's modified_timestamp
* move_date: Moves dates to the begining of the filename.
TODO: Properly handle date ranges, for move_date
TODO: Properly handle all calls to execute_on_file_stem
"""
try:
source_file, source_file_parent, source_file_stem, source_file_suffix = get_file_parts(filename)
except AttributeError:
raise
if len(kwargs) == 0:
return source_file
func_list = []
options = [o.strip().lower() for o in kwargs.keys()]
# We need to apply these in this order
if 'clean_names' in options:
func_list.append(clean_up_name)
if 'fix_dates' in options:
func_list.append(fix_date_strings)
if 'add_file_date' in options:
func_list.append(add_file_date)
if 'move_date' in options:
func_list.append(move_date_to_start_of_string)
for func in func_list:
execute_on_file_stem(func, source_file_stem)
# Logic:
# * Clean up filename
# * Fix dates in the filename
# * Try renaming:
# * If the filename contains a date range, then move it to the begining of the file, and stop
# * If the filename contains a full date, then move it to the begining of the file, and stop
# * If the filename contains year month only, then move it to the begining of the file, and stop
# * If the filename only contains a month, then
# * Get the year from the parent folder name, or from the file's created timestamp
# * Prepend the year, and move the month just after it, and stop
# * If the filename only contains a year, then move it to the begining of the file, and stop
new_file_stem = clean_up_name(source_file_stem)
# Try to fix dates
new_file_stem, found_match = fix_date_strings(new_file_stem)
date_parts = ('year', 'month', 'day')
file_name_date = {}
# Get the best date we have
for date_match in get_date_strings(new_file_stem):
for date_part in date_parts:
if date_part in date_match.groupdict():
file_name_date[date_part] = date_match.groups(date_part)
if 'year' not in file_name_date:
file_name_date['year'] = backup_file_year
break
# We should have a good date now
file_prefix = ' '.join(file_name_date[d] for d in date_parts)
new_file_stem, found_match = move_year_month_to_string_start(new_file_stem)
# In this case, we should use some other value for the year
if found_match is False:
new_file_stem, found_match = replace_month(new_file_stem)
if found_match:
if YEAR_PATTERN.search(str(filename.parent)) is not None:
file_year = YEAR_PATTERN.search(str(filename.parent)).group(0)
else:
file_year = dt.fromtimestamp(filename.stat().st_mtime).year
new_file_stem = '{0} {1}'.format(file_year, new_file_stem)
if found_match is True and new_file_stem != source_file_stem:
destination_file = futils.get_unique_filename(source_file.with_name('{0}{1}'.format(new_file_stem, file_suffix)))
destination_file = futils.safe_move(source_file, destination_file.name)
else:
destination_file = source_file
return destination_file
def get_files(directory:Union[PurePosixPath, PureWindowsPath]) -> Sequence[str]:
"""
Returns a list of the full path for each file in the given directory
"""
# return_file_list = [Path(f) for f in directory.glob('**/*') if f.is_file() and not bool(f.stat().st_file_attributes & stat.FILE_ATTRIBUTE_HIDDEN) and len(f.suffix) > 0]
return_file_list = [Path(f) for f in directory.glob('**/*') if f.is_file()]
for exclude_pattern in EXCLUDE_LIST:
return_file_list = [f for f in return_file_list if exclude_pattern not in str(f)]
return return_file_list
def process_files(input_directory:PATH_TYPE) -> PATH_SEQ_TYPE:
"""
Processes files in the provided directory
"""
processed_files = []
for this_file in get_files(input_directory):
if this_file.name in DELETE_LIST:
this_file.unlink()
processed_files.append((this_file, 'Deleted'))
else:
processed_files.append((this_file, apply_renaming_rules(this_file)))
return processed_files
# for input_directory in INPUT_PATHS:
# processed_files = process_files(input_directory)
# for original_file, new_file in processed_files:
# if str(original_file) != str(new_file):
# print('Renamed {0}\nto\t{1}\n'.format(str(original_file), str(new_file)))
test_strings = [
'CLIENT Weekly Performance Report 6 9 14 to 6 15 14',
'2014 03 27 CLIENT Monthly Reporting Sample',
'Rev Share \'14'
]
for test_string in test_strings:
a, matched = fix_date_strings(test_string)
for date_match in get_date_strings(a):
c = 1
```
#### File: pyUtils/utils/fix_registry_paths.py
```python
import const
import futils as fu
import io
from pathlib import Path
import sys
from typing import (List, Union)
"""
Fixes corrupted Registry Paths, related to a bug in Windows Insider editions which replaces
the drive in non-system Drive Program Files/Program Files (x86) paths to the system drive.
This script will look for these paths, and update them.
NOTE: This hasn't really been tested, except on one laptop.
"""
INPUT_FILE = Path('~/Desktop/hkcr.reg').expanduser()
OUTPUT_FILE = INPUT_FILE.with_name('{0}_replaced{1}'.format(INPUT_FILE.stem, INPUT_FILE.suffix))
d_program_file_paths = ['{0}\\\\'.format(str(p).replace('\\', '\\\\')) for p in fu.get_unique_paths('D:/Program Files', 'C:/Program Files')] + [str(p).replace('\\', '\\\\') for p in fu.get_unique_paths('D:/Program Files (x86)', 'C:/Program Files (x86)')]
incorrect_program_paths = ['C{0}'.format(p[1:]) for p in d_program_file_paths]
path_replacements = [p for p in zip(incorrect_program_paths, d_program_file_paths)]
# get_path_part_count = lambda p: len(fu.get_path_parts(p))
made_changes = False
class RegFile_Section():
__header_string__ = 'Windows Registry Editor'
def __init__(self, section_text:List[str]):
"""
Returns a RegFile_Section object for the section_text
"""
self.name = None # type: str
self.type = None # type: str
self.content = [] # type: List[str]
self.text = [] # type: List[str]
content = [str(t) for t in section_text]
if content[0].startswith(self.__header_string__):
self.type = 'HEADER'
self.content = content
else:
self.type = 'SECTION'
self.name = content[0].strip().lstrip('[').rstrip(']')
self.content = content[1:]
self.text = ''.join(content)
def __repr__(self):
"""
Returns the raw text of the object
"""
return self.text
def __contains__(self, text):
"""
Returns true if this object contains text
"""
return text in repr(self)
class RegFile():
def __init__(self, filename):
"""
Returns an iterator for the filename
"""
self.header = None
self.filename = str(Path(filename))
def __repr__(self):
"""
Returns a description of the object
"""
return 'RegFile <{0}>'.format(self.filename)
def __get_next_section__(self, file_obj):
"""
Returns the next file section, as a RegFile_Section object,
or None, if at the end of the file.
"""
return_strings = [] # type: List[str]
found_header_or_eof = False # type: bool
while found_header_or_eof is False:
current_position = file_obj.tell()
this_line = file_obj.readline() # type:Union[str, None]
if len(this_line) == 0:
found_header_or_eof = True
elif this_line.startswith('[') and this_line.strip().endswith(']') and len(return_strings) > 0:
file_obj.seek(current_position, io.SEEK_SET)
found_header_or_eof = True
else:
return_strings.append(this_line)
if len(return_strings) > 0:
return RegFile_Section(return_strings)
else:
return None
def __iter__(self):
yield from self.sections()
def sections(self):
"""
Returns an iterator of the file's sections
"""
with open(self.filename, 'rt', encoding='utf-16') as in_file:
try:
while True:
yield self.__get_next_section__(in_file)
except EOFError:
pass
in_file = RegFile(INPUT_FILE)
def parse_sections(input_file, output_filename):
made_changes = False
with open(output_filename, 'wt', encoding='utf-16', newline='\n') as out_file:
for section in input_file.sections():
if section is not None:
file_record = section.text
if 'C:\\\\Program Files' in section:
found_record = [i for i, p in enumerate(incorrect_program_paths) if p in section]
if len(found_record) > 0:
made_changes = True
for replacement_index in found_record:
incorrect_path, replacement_path = path_replacements[replacement_index]
file_record = file_record.replace(incorrect_path, replacement_path)
print('Changed {0} to {1}'.format(incorrect_path, replacement_path))
out_file.write(file_record)
elif section.type == 'HEADER':
out_file.write(file_record)
else:
break
return made_changes
made_changes = parse_sections(in_file, OUTPUT_FILE)
if made_changes is False:
Path(OUTPUT_FILE).unlink()
print('No changes made.')
print('All done!')
``` |
{
"source": "jobrajac/ca-es",
"score": 2
} |
#### File: ca-es/src/ca_es.py
```python
import copy
import logging
import math
import matplotlib.pyplot as plt
import multiprocessing as mp
import numpy as np
import random
import time
import torch
import torch.multiprocessing as tmp
import torch.nn.functional as F
import torch.tensor as tt
from torchvision.utils import save_image
from dist import Master, Worker
from net import CAModel
from pool import CustomPool
from utils import load_emoji, to_rgb, visualize_batch, append_file, write_file, export_model, dmg
from weight_updates import hebbian_update
HIDDEN_SIZE = None
class EvolutionStrategy:
"""Master class for performing an evolution.
Keeps track of hyperparameters, weights/coeffs.
Contains methods for running the environment, evaluate performances and update parameters.
"""
def __init__(self, args):
self.iterations = args.iter
self.learning_rate = args.lr
self.sigma = args.sigma
self.pop_size = args.pop_size
self.fire_rate = args.fire_rate
self.target_size = args.size
self.target_padding = args.pad
self.new_size = self.target_size + 2 * self.target_padding
self.channel_n = args.channels
self.hidden_size = args.hidden_size
HIDDEN_SIZE = self.hidden_size
self.target_img = load_emoji(args.emoji, self.target_size)
self.use_hebb = args.hebb
self.use_pool = args.pool
self.damage = args.damage
self.damageChannels = args.damageChannels
self.use_mp = args.use_mp
self.decay_state = 0
self.log_main_every = 10
self.hit_goal = False
self.cross_machine = args.cross_machine
self.is_master = args.master
self.nodes = args.nodes
if self.damage > 0:
if not self.use_pool and not self.damage <=3:
raise ValueError("use_pool needs to be true and damage_bottom_n < 4.")
if self.cross_machine:
if self.is_master:
self.master = Master(nodes=args.nodes)
else:
self.worker = Worker(run_id=0)
p = self.target_padding
self.pad_target = F.pad(tt(self.target_img), (0, 0, p, p, p, p))
h, w = self.pad_target.shape[:2]
self.seed = np.zeros([h, w, self.channel_n], np.float64)
self.seed[h // 2, w // 2, 3:] = 1.0
if self.use_pool:
self.pool_size = 1024
self.batch_size = 4
self.pool = CustomPool(self.seed, self.pool_size)
else:
self.batch_size = 1
if self.use_hebb:
self.coefficients_per_synapse = 5
plastic_weights = 3 * self.channel_n * self.hidden_size + self.hidden_size * self.channel_n
self.coeffs_start_interval = 0.001
self.coeffs = np.random.uniform(-self.coeffs_start_interval, self.coeffs_start_interval,
(plastic_weights, self.coefficients_per_synapse))
self.net = CAModel(channel_n=self.channel_n, fire_rate=self.fire_rate, new_size_pad=self.new_size,
disable_grad=True, hidden_size=self.hidden_size, batch_size=self.batch_size, use_hebb=True)
else:
self.net = CAModel(channel_n=self.channel_n, fire_rate=self.fire_rate, new_size_pad=self.new_size,
disable_grad=True, hidden_size=self.hidden_size, batch_size=self.batch_size)
self.parameters_shape = [tuple(w.shape) for w in self.net.parameters()]
self.log_folder = args.log_folder
logging.basicConfig(filename=self.log_folder + "/logging.txt", format='%(message)s', filemode="w",
level=logging.INFO)
if args.pre_trained != "":
if self.use_hebb:
self.coeffs = np.load(args.pre_trained)
else:
self.load_model(args.pre_trained)
logging.info("lr/(pop*sigma) at start: " + str(self.learning_rate / (self.pop_size * self.sigma)))
# For logging
self.x_range = []
self.y_lin = []
self.avg = []
self.avg_iter = []
self.losses_main = []
self.iter_main = []
t_rgb = to_rgb(self.pad_target).permute(2, 0, 1)
save_image(t_rgb, self.log_folder + "/target_image.png")
def load_model(self, path):
"""Load a PyTorch model from path."""
self.net.load_state_dict(torch.load(path))
self.net.double()
def fitness_shaping(self, x):
"""Sort x and and map x to linear values between -0.5 and 0.5
Return standard score of x
"""
shaped = np.zeros(len(x))
shaped[x.argsort()] = np.arange(len(x), dtype=np.float64)
shaped /= (len(x) - 1)
shaped -= 0.5
shaped = (shaped - shaped.mean()) / shaped.std()
return shaped
def update_coeffs(self, fitnesses, epsilons):
"""Update parent Hebbian coefficients using evaluated mutants and fitness."""
fitnesses = self.fitness_shaping(fitnesses)
for index, c in enumerate(self.coeffs):
layer_population = np.array([p[index] for p in epsilons])
update_factor = self.learning_rate / (self.pop_size * self.sigma)
self.coeffs[index] = c + update_factor * np.dot(layer_population.T, fitnesses).T
def update_parameters(self, fitnesses, epsilons):
"""Update parent network weights using evaluated mutants and fitness."""
fitnesses = self.fitness_shaping(fitnesses)
for i, e in enumerate(epsilons):
for j, w in enumerate(self.net.parameters()):
w.data += self.learning_rate * 1 / (self.pop_size * self.sigma) * fitnesses[i] * e[j]
def get_population(self, use_seed=None):
"""Return an array with values sampled from N(0, sigma).
The shape of the array is (pop_size, (layer1_size, layer2_size)) using ES and (pop_size, plastic_weights, 5)
"""
if use_seed is not None:
np.random.seed(use_seed)
temp_pop = self.pop_size
if self.is_master:
temp_pop /= self.nodes
eps = []
if self.use_hebb:
layers = self.coeffs
for i in range(int(temp_pop / 2)):
e = []
e2 = []
for w in layers:
j = np.random.randn(*w.shape) * self.sigma
e.append(j)
e2.append(-j)
eps.append(e)
eps.append(e2)
else:
layers = self.parameters_shape
for i in range(int(temp_pop / 2)):
e = []
e2 = []
for w in layers:
j = np.random.randn(*w) * self.sigma
e.append(j)
e2.append(-j)
eps.append(e)
eps.append(e2)
return np.array(eps, dtype=np.object)
def train_step_hebb(self, model_try, coeffs_try, x):
"""Perform a generation of CA. Initialize a random net and update weights in every update step using
trained coeffs.
Return output x and loss
"""
torch.seed()
losses = torch.zeros(x.shape[0])
for j, x0 in enumerate(x): # Iterate over batch
model_try.apply(weights_init)
model_try.fc1.weight.zero_()
x0 = x0[None, ...]
weights1_2, weights2_3 = list(model_try.parameters())
weights1_2 = weights1_2.detach().numpy()
weights2_3 = weights2_3.detach().numpy()
iter_n = torch.randint(30, 40, (1,)).item() # Episode
for i in range(iter_n):
o0, o1, x0 = model_try(x0)
weights1_2, weights2_3 = hebbian_update(coeffs_try, weights1_2, weights2_3, o0.numpy(),
o1.numpy(), x0.numpy())
(a, b) = (0, 1)
list(model_try.parameters())[a].data /= list(model_try.parameters())[a].__abs__().max()
list(model_try.parameters())[b].data /= list(model_try.parameters())[b].__abs__().max()
list(model_try.parameters())[a].data *= 0.4
list(model_try.parameters())[b].data *= 0.4
loss = model_try.loss_f(x0, self.pad_target)
loss = torch.mean(loss)
losses[j] = loss.item()
x[j] = x0[0, ...]
loss = torch.mean(losses)
return x, loss.item()
def train_step_es(self, model_try, x):
"""Perform a generation of CA using trained net.
Return output x and loss
"""
torch.seed()
iter_n = torch.randint(30, 40, (1,)).item()
for i in range(iter_n): # Episode
x = model_try(x)
loss = self.net.loss_f(x, self.pad_target)
loss = torch.mean(loss)
return x, loss.item()
def get_fitness_hebb(self, epsilon, x0, pid, q=None):
"""Method that start a generation of Hebbian ES.
Return output from generation x and its fitness
"""
model_try = CAModel(channel_n=self.channel_n, fire_rate=self.fire_rate, new_size_pad=self.new_size,
disable_grad=True, hidden_size=self.hidden_size, batch_size=self.batch_size, use_hebb=True)
torch.seed()
model_try.apply(weights_init)
coeffs_try = self.coeffs.copy()
coeffs_try += epsilon
x, loss = self.train_step_hebb(model_try, coeffs_try, x0.clone())
fitness = -loss
if not math.isfinite(fitness):
raise ValueError('Fitness ' + str(fitness) + '. Loss: ' + str(loss))
if self.use_mp:
q.put((x, fitness, pid))
return
return x, fitness
def get_fitness_es(self, epsilon, x0, pid, q=None):
"""Method that start a generation of ES.
Return output from generation x and its fitness
"""
model_try = copy.deepcopy(self.net)
if epsilon is not None:
for i, w in enumerate(model_try.parameters()):
w.data += torch.tensor(epsilon[i])
x, loss = self.train_step_es(model_try, x0)
fitness = -loss
if not math.isfinite(fitness):
raise ValueError('Encountered non-number value in loss. Fitness ' + str(fitness) + '. Loss: ' + str(loss))
if self.use_mp:
q.put((x, fitness, pid))
return
return x, fitness
def evaluate_main(self, x0):
"""Return output and fitness from a generation using unperturbed weights/coeffs"""
if self.use_hebb:
x_main, loss_main = self.train_step_hebb(self.net, self.coeffs, x0.clone())
fit_main = - loss_main
else:
x_main, loss_main = self.train_step_es(self.net, x0.clone())
fit_main = - loss_main
return x_main, fit_main
def create_plots(self, x_range, y_lin, avg_iter, avg, iter_main, losses_main):
"""Plot population's fitnesses, average fitnesses and main network's fitnesses.
Two plots, one for all iterations so far, and one for the last 100 iterations.
"""
plt.clf()
plt.scatter(x_range, np.log10(y_lin), color="blue", s=0.5)
plt.plot(avg_iter, np.log10(avg), color='pink')
plt.plot(iter_main, np.log10(losses_main), color='red', alpha=0.7)
plt.title("Log-loss for " + self.log_folder)
plt.savefig(self.log_folder + "/log_loss_over_time.png")
if len(x_range) >= 100 * self.pop_size:
# log 10, last 100 iters
plt.clf()
plt.scatter(x_range[-100 * self.pop_size:], np.log10(y_lin[-100 * self.pop_size:]), s=0.5)
plt.plot(avg_iter[-100:], np.log10(avg[-100:]), color='red')
plt.title("Log-loss last 100 for " + self.log_folder)
plt.savefig(self.log_folder + "/log_loss_over_time_last100.png")
def save_data(self, buffer, x_range, y_lin, iter_main, losses_main, iteration):
"""Save raw population and main network fitnesses to a csv file on the format: iteration, fitness"""
if len(x_range) > 0:
points = buffer * self.pop_size
append_file(self.log_folder + '/raw/losses.csv', x_range[-points:], y_lin[-points:])
# this one overwrites
write_file(self.log_folder + '/raw/main_losses.csv', iter_main, losses_main)
if self.use_hebb:
np.save(self.log_folder + "/models/" + str(iteration) + '.npy', self.coeffs)
else:
export_model(self.net, self.log_folder + "/models/saved_model_" + str(iteration) + ".pt")
def log(self, fitnesses, iteration, x0=None, xs=None):
"""Function to add fitnesses to arrays and plot/save data at iteration intervals."""
if x0 is None:
x0 = tt(np.repeat(self.seed[None, ...], self.batch_size, 0))
# Logging/plotting
for k, fit in enumerate(fitnesses):
self.x_range.append(iteration)
self.y_lin.append(-fit)
self.avg.append(-np.average(fitnesses))
self.avg_iter.append(iteration)
# Evaluate main net/coeffs
if iteration % self.log_main_every == 0:
x_main, fit_main = self.evaluate_main(x0.clone())
self.losses_main.append(-fit_main)
self.iter_main.append(iteration)
# Visualize batch and plot points
if iteration % 500 == 0:
if xs == None:
visualize_batch([x_main], iteration, self.log_folder, nrow=self.batch_size)
else:
selected = xs[np.argmax(fitnesses)]
visualize_batch([x0.clone(), selected, x_main], iteration, self.log_folder, nrow=self.batch_size)
self.create_plots(self.x_range, self.y_lin, self.avg_iter, self.avg, self.iter_main, self.losses_main)
# Save points and weights/coeffs to file
buffer = 1000
if iteration % buffer == 0:
self.save_data(buffer, self.x_range, self.y_lin, self.iter_main, self.losses_main, iteration)
mean_fit = np.mean(fitnesses)
# Decay learning rate
if mean_fit >= -0.03 and self.decay_state == 0:
self.learning_rate *= 0.3
self.decay_state += 1
logging.info("Setting lr to " + str(self.learning_rate) + " at iter " + str(iteration))
elif mean_fit >= -0.01 and self.decay_state == 1:
self.learning_rate *= 0.5
self.decay_state += 1
logging.info("Setting lr to " + str(self.learning_rate) + " at iter " + str(iteration))
print('step: %d, mean fitness: %.3f, best fitness: %.3f' % (iteration, mean_fit, np.max(fitnesses)))
# check = 250
# if (len(self.losses_main) > check//self.log_main_every) and not self.hit_goal:
# mean_main_loss = np.mean(self.losses_main[-(check//self.log_main_every):])
# if mean_main_loss <= 0.001:
# logging.info("Hit goal at " + str(iteration))
# if self.use_hebb:
# np.save(self.log_folder + "/models/" + str(iteration) + "good" + '.npy', self.coeffs)
# else:
# export_model(self.net, self.log_folder + "/models/saved_model_" + str(iteration) + "good" + ".pt")
# self.hit_goal = True
def run_master(self):
"""Send weights/coeffs to worker nodes and poll for results.
Update weights/coeffs when all results are present.
"""
# ticM = time.time()
for iter in range(self.iterations):
# logging.info("Sending weights")
weights_to_send = self.coeffs if self.use_hebb else self.net.state_dict()
self.master.send_weights(weights_to_send)
# logging.info("Waiting for results...")
fitnesses, seeds = self.master.wait_for_results()
# logging.info("Got all results!")
fitnesses = np.array(fitnesses)
eps_seeds = np.array(seeds)
epsilons = []
for seed in eps_seeds:
eps = self.get_population(use_seed=seed)
epsilons.append(eps)
for i, fit in enumerate(fitnesses):
if self.use_hebb:
self.update_coeffs(fit, epsilons[i])
else:
self.update_parameters(fit, epsilons[i])
all_fitnesses = []
for fit in fitnesses:
all_fitnesses.extend(fit)
self.log(all_fitnesses, iter)
# if iter == 999:
# tocM = time.time()
# logging.info("time used in milliseconds: " + str(int((tocM -ticM)*1000)))
def run(self):
"""Start evolution using Hebbian or ES.
If using multiple nodes this method will listen for weights/coeffs and send results.
If not the method will also start other methods to update parameters and log results.
If using a pool the method will sample x's from the pool and damage them (if damage is enabled), before every generation.
"""
# seed
x0 = tt(np.repeat(self.seed[None, ...], self.batch_size, 0))
# Run models once to compile jitted methods.
if self.use_hebb:
model_try = CAModel(channel_n=self.channel_n, fire_rate=self.fire_rate, new_size_pad=self.new_size,
disable_grad=True, hidden_size=self.hidden_size, batch_size=self.batch_size, use_hebb=True)
_, _ = self.train_step_hebb(model_try, self.coeffs, x0.clone())
else:
_, _ = self.train_step_es(self.net, x0.clone())
if self.use_mp:
processes = []
q = tmp.Manager().Queue()
for iter in range(self.iterations):
if self.use_pool:
batch = self.pool.sample(self.batch_size)
x0 = batch["x"]
loss_rank = self.net.loss_f(tt(x0), self.pad_target).numpy().argsort()[::-1]
x0 = x0[loss_rank]
x0[:1] = self.seed
if self.damage:
for i in range(self.damage):
x0[-(i+1)] = dmg(x0[-(i+1)], self.new_size, only_bottom=True)
x0 = tt(x0)
if self.cross_machine:
if self.use_hebb:
self.coeffs = self.worker.poll_weights()
else:
weights = self.worker.poll_weights()
self.net.load_state_dict(weights)
eps_seed = np.random.randint(0, 2**32-1)
epsilons = self.get_population(use_seed=eps_seed)
else:
epsilons = self.get_population()
fitnesses = np.zeros((self.pop_size), dtype=np.float64)
xs = torch.zeros(self.pop_size, *x0.shape, dtype=torch.float64)
for i in range(self.pop_size):
if self.use_hebb:
if self.use_mp:
p = tmp.Process(target=self.get_fitness_hebb, args=(np.array(epsilons[i], dtype=np.float64), x0.clone(), i, q))
processes.append(p)
else:
x, fit = self.get_fitness_hebb(np.array(epsilons[i], dtype=np.float64), x0.clone(), i)
else:
if self.use_mp:
p = tmp.Process(target=self.get_fitness_es, args=(epsilons[i], x0.clone(), i, q))
processes.append(p)
else:
x, fit = self.get_fitness_es(epsilons[i], x0.clone(), i)
if not self.use_mp:
fitnesses[i] = fit
xs[i] = x
if self.use_mp:
for p in processes:
p.start()
for p in processes:
p.join()
x, fit, pid = q.get()
fitnesses[pid] = fit
xs[pid] = x
processes = []
if not q.empty():
print("Queue not empty")
if self.use_pool:
idx = np.argmax(fitnesses)
batch["x"][:] = xs[idx]
self.pool.commit(batch)
fitnesses = np.array(fitnesses).astype(np.float64)
if self.cross_machine:
self.worker.send_result(fitnesses, eps_seed)
else:
if self.use_hebb:
self.update_coeffs(fitnesses, epsilons)
else:
self.update_parameters(fitnesses, epsilons)
self.log(fitnesses, iter, x0=x0, xs=xs)
def weights_init(m):
"""Initialize a network's weights with uniform distributed values.
Used for Hebbian ES.
"""
if isinstance(m, torch.nn.Linear) and m.in_features != HIDDEN_SIZE:
torch.nn.init.uniform_(m.weight.data, -0.1, 0.1)
```
#### File: ca-es/src/dist.py
```python
import numpy as np
import pickle
import random
import redis
import time
RESULTS_KEY = "results"
HOST = '10.100.10.10'
PORT = 6379
DB = 0
PASSWORD = ""
def serialize(x):
"""Return a pickled object."""
return pickle.dumps(x)
def deserialize(x):
"""Return a depickled object."""
return pickle.loads(x)
class Master(object):
"""Master that sends weights/coefficients to workers and waits for results from them."""
def __init__(self, nodes):
self.r = redis.Redis(host=HOST, port=PORT, db=DB, password=PASSWORD)
self.run_id = 1
self.nodes = nodes
for key in self.r.scan_iter():
print("deleting key", key)
self.r.delete(key)
def wait_for_results(self):
"""Wait for all workers to send fitnesses and seed to redis."""
rewards = []
seeds = []
returnert = 0
while returnert < self.nodes:
_, res = self.r.blpop(RESULTS_KEY)
rew, seed = deserialize(res)
rewards.append(rew)
seeds.append(seed)
returnert += 1
time.sleep(0.01)
return rewards, seeds
def send_weights(self, weights):
"""Send weights/coefficients to redis."""
self.r.set("weights", serialize(weights))
self.r.set("run_id", serialize(self.run_id))
self.run_id += 1
class Worker(object):
"""Wait for weights/coefficients from master and return fitnesses and seed."""
def __init__(self, run_id):
self.r = redis.Redis(host=HOST, port=PORT, db=DB, password=PASSWORD)
self.run_id = run_id
def poll_weights(self):
"""Wait for new weights/coefficients in redis."""
while True:
new_run_id = deserialize(self.r.get("run_id"))
time.sleep(0.1)
if new_run_id != self.run_id:
break
self.run_id = new_run_id
weights = deserialize(self.r.get("weights"))
return weights
def send_result(self, rew, seed):
"""Put fitnesses and seed in redis."""
self.r.rpush(RESULTS_KEY, serialize((rew, seed)))
```
#### File: ca-es/src/net_modeltest.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from numba import njit
from numpy.random import default_rng
"""
This file is very similar to net.py, with minor changes to return more information.
See the comments on net.py for method descriptions.
"""
def to_rgba(x):
return x[..., :4]
def get_living_mask(x):
alpha = x[:, :, :, 3:4]
m = F.max_pool3d(alpha, kernel_size=3, stride=1, padding=1) > 0.1
return m
class CAModel(nn.Module):
def __init__(self, channel_n, fire_rate, hidden_size, new_size_pad, batch_size, disable_grad=True, use_hebb=False):
super(CAModel, self).__init__()
self.channel_n = channel_n
self.fire_rate = fire_rate
self.hidden_size = hidden_size
self.new_size_pad = new_size_pad
self.batch_size = batch_size
self.use_hebb = use_hebb
self.fc0 = nn.Linear(self.channel_n * 3, self.hidden_size, bias=False)
self.fc1 = nn.Linear(self.hidden_size, self.channel_n, bias=False)
with torch.no_grad():
self.fc1.weight.zero_()
identify = np.float64([0, 1, 0])
identify = np.outer(identify, identify)
identify = torch.from_numpy(identify)
dx = np.outer([1, 2, 1], [-1, 0, 1]) / 8.0 # Sobel filter
dx = torch.from_numpy(dx)
dy = dx.T
self.kernel = torch.cat([identify[None, None, ...], dx[None, None, ...], dy[None, None, ...]], dim=0).repeat(self.channel_n, 1, 1, 1)
if disable_grad:
for param in self.parameters():
param.requires_grad = False
else:
for param in self.parameters():
param.requires_grad = True
self.double()
def perceive(self, x):
x = F.conv2d(x.permute(0, 3, 1, 2), self.kernel, groups=16, padding=1)
x = x.permute(0, 2, 3, 1)
return x
def forward(self, x, fire_rate=None, step_size=1.0):
pre_life_mask = get_living_mask(x)
x_old = x.detach().clone()
x = self.perceive(x)
if self.use_hebb:
y = x.detach().clone()
x = self.fc0(x)
x = F.relu(x)
if self.use_hebb:
dx1 = x.detach().clone()
x = self.fc1(x)
x = x * step_size
dx = x.detach().clone()
if fire_rate is None:
fire_rate = self.fire_rate
update_mask_rand = torch.rand(*x[:, :, :, :1].shape)
update_mask = update_mask_rand <= fire_rate
x = x_old + x * update_mask.double()
post_life_mask = get_living_mask(x)
life_mask = pre_life_mask.bool() & post_life_mask.bool()
x = x * life_mask.double()
x = x.reshape(self.batch_size, self.new_size_pad, self.new_size_pad, self.channel_n)
if self.use_hebb:
return y, dx1, x, dx
else:
return x, dx
def loss_f(self, x, y):
return torch.mean(torch.square(x[..., :4] - y), [-2, -3, -1])
``` |
{
"source": "jobregon1212/rulecosi",
"score": 2
} |
#### File: rulecosi/rulecosi/helpers.py
```python
from rulecosi.rules import RuleSet, Rule, Condition
from bitarray import bitarray
def not_exists_add(element, set_):
if element not in set_:
set_.add(element)
return False
else:
return True
def remove_duplicated_rules(list_of_rulesets, weights=None):
x_set = set()
new_list_rulesets = []
if weights is not None:
new_weights = []
else:
new_weights = None
for idx, ruleset in enumerate(list_of_rulesets):
filtered_rules = [x for x in ruleset if not not_exists_add(x, x_set)]
if len(filtered_rules) > 0:
new_list_rulesets.append(RuleSet(filtered_rules, ruleset.condition_map))
if weights is not None:
new_weights.append(weights[idx])
return new_list_rulesets, new_weights, x_set
def order_trees_by_weight(list_of_rulesets, weights):
ordered_tuples = [(y, x) for y, x in sorted(zip(weights, list_of_rulesets), reverse=True)]
return map(list, zip(*ordered_tuples))
def total_n_rules(list_of_rulesets):
return sum([len(ruleset.get_rule_list()) for ruleset in list_of_rulesets])
def zero_bitarray(size):
b_array = bitarray(size)
b_array.setall(False)
return b_array
def one_bitarray(size):
b_array = bitarray(size)
b_array.setall(True)
return b_array
def list_and_operation(list_):
return_set = list_[0]
for i in range(1, len(list_)):
return_set = return_set & list_[i]
return return_set
def list_or_operation(list_):
return_set = list_[0]
for i in range(1, len(list_)):
return_set = return_set | list_[i]
return return_set
def count_rules_conds(ruleset):
total_cond = 0
for rule in ruleset:
total_cond += len(rule.A)
return len(ruleset.rules), total_cond
# https://stackoverflow.com/questions/54699105/how-to-count-the-number-of-occurrences-of-a-nested-dictionary-key
def count_keys(dict_, key):
return (key in dict_) + sum(count_keys(v, key) for v in dict_.values() if isinstance(v, dict))
```
#### File: rulecosi/rulecosi/rule_heuristics.py
```python
import operator
import copy
from functools import reduce
from bitarray import bitarray
from .helpers import one_bitarray
class RuleHeuristics:
""" This class controls the computation of heuristics of the rules.
For fast computation we use the bitarray class. At the beginning, an N-size
bitarray for each condition is computed, with N=n_samples. This array
contains 1 if the record was satisfied by the condition and 0 otherwise.
When a combination is performed, this bitarray are combined using the
intersection set operation to find out how many records are covered by the
new rule (which is a combination of conditions). Additionally, there are two
extra bitarrays, one covering each of the classes (right now it jus support
binary class). The cardinality of all these bitarrays are used to compute
the coverage and confidence of the rules very fast.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values. An array of int.
condition_map: dictionary of <condition_id, Condition>, default=None
Dictionary of Conditions extracted from all the ensembles. condition_id
is an integer uniquely identifying the Condition.
classes : ndarray, shape (n_classes,)
The classes seen in the ensemble fit method.
cov_threshold: float, default=0.0
Coverage threshold of a rule to be considered for further combinations.
The greater the value the more rules are discarded. Default value is
0.0, which it only discards rules with null coverage.
conf_threshold: float, default=0.5
Confidence or rule accuracy threshold of a rule to be considered for
further combinations. The greater the value, the more rules are
discarded. Rules with high confidence are accurate rules. Default value
is 0.5, which represents rules with higher than random guessing
accuracy.
min_samples: int, default=1
The minimum number of samples required to be at rule in the simplified
ruleset.
"""
def __init__(self, X, y, classes_, condition_map,
cov_threshold=0.0, conf_threshold=0.5, min_samples=1):
self.X = X
self.y = y
self.classes_ = classes_
self.condition_map = condition_map
self.cov_threshold = cov_threshold
self.conf_threshold = conf_threshold
self.min_samples = min_samples
self.training_bit_sets = None
self._cond_cov_dict = None
def get_conditions_heuristics(self, conditions, uncovered_mask=None):
""" Compute the heuristics of the combination of conditions using the
bitsets of each condition from the training set. An intersection
operation is made and the cardinality of the resultant set is used
for computing the heuristics
:param conditions: set of conditions' id
:param uncovered_mask: if different than None, mask out the records that
are already covered from the training set. Default is None.
:return: a dictionary with the following keys and form
- cov_set : array of bitsets representing the coverage by class
and total coverage
- cov: the coverage of the conditions
- conf: array of the confidence values of the conditions by
class
- supp: array of the support values of the conditions by class
"""
heuristics_dict = self.create_empty_heuristics()
if len(conditions) == 0:
return heuristics_dict
b_array_conds = [reduce(operator.and_,
[self._cond_cov_dict[i][cond] for cond in
conditions])
for i in range(len(self.classes_))]
b_array_conds.append(reduce(operator.or_, [i for i in b_array_conds]))
if uncovered_mask is not None:
b_array_conds = [b_array_measure & uncovered_mask for
b_array_measure in b_array_conds]
updated_mask = ~b_array_conds[-1] & uncovered_mask
uncovered_mask.clear()
uncovered_mask.extend(updated_mask)
cov_count = b_array_conds[-1].count()
if cov_count == 0:
return heuristics_dict
class_cov_count = [b_array_conds[i].count() for i in
range(len(self.classes_))]
coverage = cov_count / self.X.shape[0]
heuristics_dict['cov_set'] = b_array_conds
heuristics_dict['cov'] = coverage
heuristics_dict['conf'] = [class_count / cov_count for class_count in
class_cov_count]
heuristics_dict['supp'] = [class_count / self.X.shape[0] for class_count
in class_cov_count]
return heuristics_dict
def compute_rule_heuristics(self, ruleset, uncovered_mask=None,
sequential_coverage=False):
""" Compute rule heuristics, but without the sequential_coverage
parameter, and without removing the rules that do not meet the
thresholds
:param ruleset: RuleSet object representing a ruleset
:param uncovered_mask: if different than None, mask out the records that
are already covered from the training set. Default is None.
:param sequential_coverage:If true, the covered examples covered by one
rule are removed. Additionally, if a rule does not meet the
threshold is discarded. If false, it just compute the heuristics
with all the records on the training set for all the rules. Default
is False
"""
if uncovered_mask is None:
uncovered_mask = one_bitarray(self.X.shape[0])
if sequential_coverage:
ruleset.rules[:] = [rule for rule in ruleset if
self.rule_is_accurate(rule, uncovered_mask)]
else:
for rule in ruleset:
local_uncovered_instances = copy.copy(uncovered_mask)
heuristics_dict = self.get_conditions_heuristics(rule.A,
uncovered_mask=local_uncovered_instances)
rule.set_heuristics(heuristics_dict)
def _compute_training_bit_sets(self):
""" Compute the bitsets of the coverage for the prior class distribution
of the ensemble according to the training set
"""
return [bitarray((self.y == self.classes_[i]).astype(int).tolist()) for
i in range(len(self.classes_))]
def _compute_condition_bit_sets(self):
""" Compute the bitsets of the coverage of every condition contained in
the ensemble according to the training set
"""
# empty sets for each condition coverage class
cond_cov_dict = [{} for _ in range(len(self.classes_) + 1)]
for cond_id, cond in self.condition_map.items():
# compute bitarray for the covered records in X_ by condition cond
cond_coverage_bitarray = bitarray(
cond.satisfies_array(self.X).astype(int).tolist())
# create the entries in the dictionary
for i in range(len(self.classes_)):
cond_cov_dict[i][cond_id] = cond_coverage_bitarray & \
self.training_bit_sets[i]
cond_cov_dict[-1][cond_id] = cond_coverage_bitarray
return cond_cov_dict
def initialize_sets(self):
""" Initialize the sets that are going to be used during the combination
and simplification process This includes the bitsets for the training
data as well as the bitsets for each of the conditions
"""
self.training_bit_sets = self._compute_training_bit_sets()
self._cond_cov_dict = self._compute_condition_bit_sets()
def rule_is_accurate(self, rule, uncovered_instances):
""" Determine if a rule meet the coverage and confidence thresholds
:param rule: a Rule object
:param uncovered_instances: mask out the records that are already
covered from the training set.
:return: boolean indicating if the rule satisfy the thresholds
"""
if uncovered_instances.count() == 0:
return False
local_uncovered_instances = copy.copy(uncovered_instances)
heuristics_dict = self.get_conditions_heuristics(rule.A,
uncovered_mask=local_uncovered_instances)
rule.set_heuristics(heuristics_dict)
if rule.cov > self.cov_threshold and rule.conf > self.conf_threshold:
uncovered_instances.clear()
uncovered_instances.extend(local_uncovered_instances)
return True
else:
return False
def create_empty_heuristics(self):
""" Create an empty dictionary for the heuristics to be computed.
:return: a dictionary with the heuristics to be computed and populated
"""
return {'cov_set': None,
'cov': 0.0,
'conf': [0.0 for _ in self.classes_],
'supp': [0.0 for _ in self.classes_]}
```
#### File: rulecosi/tests/test_rulecosi.py
```python
import pytest
from sklearn.datasets import load_iris
from rulecosi import RuleCOSIClassifier
@pytest.fixture
def data():
return load_iris(return_X_y=True)
def test_template_classifier(data):
X, y = data
clf = RuleCOSIClassifier()
assert clf.demo_param == 'demo'
clf.fit(X, y)
assert hasattr(clf, 'classes_')
assert hasattr(clf, 'X_')
assert hasattr(clf, 'y_')
y_pred = clf.predict(X)
assert y_pred.shape == (X.shape[0],)
``` |
{
"source": "jobreit/COMP0090",
"score": 2
} |
#### File: tutorials/img_sgm/utils_tf.py
```python
import tensorflow as tf
@tf.function
def dice_loss(ps,ts):
return - dice_score(ps,ts)
def dice_binary(ps,ts):
ps = tf.cast(ps>=.5,dtype=ps.dtype)
ts = tf.cast(ts>=.5,dtype=ts.dtype)
return dice_score(ps,ts)
def dice_score(ps,ts,eps=1e-7):
numerator = tf.reduce_sum(ts*ps,axis=[1,2,3])*2 + eps
denominator = tf.reduce_sum(ts,axis=[1,2,3]) + tf.reduce_sum(ps,axis=[1,2,3]) + eps
return numerator/denominator
@tf.function
def pre_process(images, labels):
images = tf.cast(tf.stack(images), dtype=tf.float32)
labels = tf.cast(tf.expand_dims(tf.stack(labels),axis=3), dtype=tf.float32)
return images, labels
## for data augmentation
def get_reference_grid(grid_size):
# grid_size: [batch, height, width]
grid = tf.cast(tf.stack(tf.meshgrid(
tf.range(grid_size[1]),
tf.range(grid_size[2]),
indexing='ij'), axis=2), dtype=tf.float32)
return tf.tile(tf.expand_dims(grid, axis=0), [grid_size[0],1,1,1])
def random_transform_generator(batch, corner_scale=.2):
# righ-multiplication affine
ori_corners = tf.tile([[[1.,1.], [1.,-1.], [-1.,1.], [-1.,-1.]]], [batch,1,1])
new_corners = ori_corners + tf.random.uniform([batch,4,2], -corner_scale, corner_scale)
ori_corners = tf.concat([ori_corners,tf.ones([batch,4,1])], axis=2)
new_corners = tf.concat([new_corners,tf.ones([batch,4,1])], axis=2)
return tf.stack([tf.linalg.lstsq(ori_corners[n],new_corners[n]) for n in range(batch)], axis=0)
def warp_grid(grid, transform):
# grid: [batch, height, width, 2]
# transform: [batch, 3, 3]
batch, height, width = grid.shape[0:3]
grid = tf.concat([tf.reshape(grid,[batch,height*width,2]),
tf.ones([batch,height*width,1])], axis=2)
grid_warped = tf.matmul(grid, transform)
return tf.reshape(grid_warped[...,:2], [batch,height,width,2])
@tf.function
def resample_linear(grid_data, sample_grids):
# grid_data: [batch, height, width]
# sample_grids: [batch, height, width, 2]
batch, height, width = (grid_data.shape[:])
sample_coords = tf.reshape(sample_grids, [batch,-1,2])
# pad to replicate the boundaries 1-ceiling, 2-floor
sample_coords = tf.stack([tf.clip_by_value(sample_coords[...,0],0,height-1),
tf.clip_by_value(sample_coords[...,1],0,width-1)], axis=2)
i1 = tf.cast(tf.math.ceil(sample_coords[...,0]), dtype=tf.int32)
j1 = tf.cast(tf.math.ceil(sample_coords[...,1]), dtype=tf.int32)
i0 = tf.maximum(i1-1, 0)
j0 = tf.maximum(j1-1, 0)
# four data points q_ij
q00 = tf.gather_nd(grid_data,tf.stack([i0,j0],axis=2), batch_dims=1)
q01 = tf.gather_nd(grid_data,tf.stack([i0,j1],axis=2), batch_dims=1)
q11 = tf.gather_nd(grid_data,tf.stack([i1,j1],axis=2), batch_dims=1)
q10 = tf.gather_nd(grid_data,tf.stack([i1,j0],axis=2), batch_dims=1)
# weights with normalised local coordinates
wi1 = sample_coords[...,0] - tf.cast(i0,dtype=tf.float32)
wi0 = 1 - wi1
wj1 = sample_coords[...,1] - tf.cast(j0,dtype=tf.float32)
wj0 = 1 - wj1
return tf.reshape(q00*wi0*wj0 + q01*wi0*wj1 + q11*wi1*wj1 + q10*wi1*wj0, [batch,height,width])
@tf.function
def random_image_label_transform(images, labels, add_dim=True):
images, labels = tf.squeeze(images), tf.squeeze(labels) #avoid channel confusion
# images: [batch, height, width]
reference_grid = get_reference_grid(images.shape[0:3])
random_transform = random_transform_generator(images.shape[0], corner_scale=0.2)
sample_grids = warp_grid(reference_grid, random_transform)
images, labels = resample_linear(images, sample_grids), resample_linear(labels, sample_grids)
if add_dim:
images, labels = tf.expand_dims(images,axis=3), tf.expand_dims(labels,axis=3)
return images, labels
@tf.function
def random_image_transform(images):
# images: [batch, height, width]
reference_grid = get_reference_grid(images.shape[0:3])
random_transform = random_transform_generator(images.shape[0], corner_scale=0.2)
sample_grids = warp_grid(reference_grid, random_transform)
return resample_linear(images, sample_grids)
``` |
{
"source": "job/rpki-client-web",
"score": 2
} |
#### File: rpki-client-web/rpkiclientweb/__main__.py
```python
import argparse
import asyncio
import logging
import os
import sys
from yaml import Loader, dump, load
from .web import RpkiClientWeb
LOG = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser("rpki-client wrapper")
parser.add_argument(
"-c", "--config", default="config.yml", type=argparse.FileType("r")
)
parser.add_argument("-v", "--verbose", action="count", default=0)
# -1: interval from config, 0 = zero delay
parser.add_argument(
"-j",
"--jitter",
default=0 if os.isatty(sys.stdout.fileno()) else 600,
type=int,
help="random delay of up to [jitter] before starting rpki-client for the first time. Defaults to 0 when in an interactive terminal, 600 when non-interactive.",
)
args = parser.parse_args()
logging.basicConfig(handlers=[logging.StreamHandler(sys.stdout)])
if args.verbose > 0:
logging.getLogger().setLevel(logging.DEBUG)
# Only log rpki-client output when very verbose.
level = logging.INFO
if args.verbose > 1:
level = logging.DEBUG
logging.getLogger("rpkiclientweb.rpki_client").setLevel(level)
else:
logging.getLogger().setLevel(logging.INFO)
conf = load(args.config, Loader=Loader)
conf["jitter"] = args.jitter
LOG.debug("Configuration: %s", conf)
web = RpkiClientWeb(conf)
asyncio.run(web.run())
return 0
if __name__ == "__main__":
sys.exit(main())
```
#### File: rpki-client-web/rpkiclientweb/rpki_client.py
```python
import asyncio
import itertools
import json
import logging
import os.path
import time
from dataclasses import dataclass, field
from typing import List, Optional
from prometheus_async.aio import time as time_metric, track_inprogress
from prometheus_client import Counter, Gauge, Histogram
from rpkiclientweb.outputparser import (
OutputParser,
WarningSummary,
missing_labels,
)
LOG = logging.getLogger(__name__)
OUTPUT_BUFFER_SIZE = 8_388_608
RPKI_CLIENT_DURATION = Histogram(
"rpkiclient_duration_seconds",
"Time spent calling rpki-client",
buckets=[1, 3, 6, 12, 18, 24, 30, 44, 60, 72, 84, 96, 108, 120, 150, 180, 240, 300],
)
RPKI_CLIENT_LAST_DURATION = Gauge(
"rpkiclient_last_duration_seconds",
"Duration of the last call to rpki-client",
)
RPKI_CLIENT_LAST_UPDATE = Gauge(
"rpkiclient_last_update",
"Timestamp of the last successful call to rpki-client",
)
RPKI_CLIENT_UPDATE_COUNT = Counter(
"rpkiclient_update", "Number of rpki-client updates", ["returncode"]
)
RPKI_CLIENT_RUNNING = Gauge(
"rpkiclient_running", "Number of running rpki-client instances"
)
RPKI_OBJECTS_COUNT = Gauge("rpki_objects", "Number of objects by type", ["type"])
RPKI_CLIENT_WARNINGS = Gauge(
"rpkiclient_warnings", "Warnings from rpki-client", ["hostname", "type"]
)
RPKI_CLIENT_PULLING = Gauge(
"rpkiclient_pulling",
"Last time pulling from this repository was started (referenced).",
["uri"],
)
RPKI_CLIENT_PULLED = Gauge(
"rpkiclient_pulled",
"Last time repo was pulled (before process ended due to timeout).",
["uri"],
)
RPKI_CLIENT_REMOVED_UNREFERENCED = Counter(
"rpkiclient_removed_unreferenced",
"Number of removals of repositories that were no longer referenced.",
)
METADATA_LABELS = (
"elapsedtime",
"usertime",
"systemtime",
"roas",
"failedroas",
"invalidroas",
"certificates",
"failcertificates",
"invalidcertificates",
"manifests",
"failedmanifests",
"stalemanifests",
"crls",
"repositories",
"vrps",
"uniquevrps",
)
OPTIONAL_METADATA_LABELS = frozenset(
[
"failedroas",
"invalidroas",
"failcertificates",
"invalidcertificates",
"stalemanifests",
]
)
@dataclass
class ExecutionResult:
returncode: int
stdout: str
stderr: str
duration: float
@dataclass
class RpkiClient:
"""Maps onto the config.yml"""
rpki_client: str
cache_dir: str
output_dir: str
trust_anchor_locators: List[str] = field(default_factory=list)
timeout: Optional[int] = None
warnings: List[WarningSummary] = field(default_factory=list)
last_update_repos: List[str] = frozenset()
@property
def args(self) -> List[str]:
if not os.path.isfile(self.rpki_client):
raise ValueError(f"rpki_client: '{self.rpki_client}' does not exist")
if not os.path.isdir(self.cache_dir):
raise ValueError(f"cache_dir: '{self.cache_dir}' is not a directory.")
if not os.path.isdir(self.output_dir):
raise ValueError(f"output_dir: '{self.output_dir}' is not a directory.")
if not (not self.timeout or self.timeout >= -1):
raise ValueError(f"illegal timeout: {self.timeout} -- should be >= -1")
# Not using `-s [timeout]` for now because the timeout is managed from
# this wrapping code.
args = [
"-v", # verbose
"-j", # JSON output
"-d",
os.path.abspath(self.cache_dir),
]
for tal in zip(itertools.repeat("-t"), self.trust_anchor_locators):
args.extend(tal)
args.append(os.path.abspath(self.output_dir))
return args
@track_inprogress(RPKI_CLIENT_RUNNING)
@time_metric(RPKI_CLIENT_DURATION)
async def run(self) -> ExecutionResult:
LOG.info("executing %s %s", self.rpki_client, " ".join(self.args))
t0 = time.monotonic()
proc = await asyncio.create_subprocess_exec(
self.rpki_client,
*self.args,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
limit=OUTPUT_BUFFER_SIZE,
)
try:
if self.timeout > 0:
await asyncio.wait_for(proc.wait(), self.timeout)
else:
await proc.wait()
except asyncio.TimeoutError:
LOG.error("timeout (%ds): killing %d", self.timeout, proc.pid)
proc.kill()
stdout, stderr = await proc.communicate()
duration = time.monotonic() - t0
LOG.info(
"[%d] exited with %d in %f seconds", proc.pid, proc.returncode, duration
)
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug("stdout: %s", stdout)
LOG.debug("stderr: %s", stderr)
RPKI_CLIENT_UPDATE_COUNT.labels(returncode=proc.returncode).inc()
RPKI_CLIENT_LAST_DURATION.set(duration)
self.update_warning_metrics(stderr, proc.returncode == 0)
asyncio.create_task(self.update_validated_objects_gauge(proc.returncode))
return ExecutionResult(
returncode=proc.returncode,
stdout=stdout.decode(),
stderr=stderr.decode(),
duration=duration,
)
def update_warning_metrics(self, stderr: bytes, was_successful_run: bool) -> None:
"""Update the warning gauges."""
parsed = OutputParser(stderr.decode("utf8"))
# Delete labels for repos not included anymore (unreferenced)
new_pulling = parsed.pulling
if was_successful_run:
for unreferenced_repo in self.last_update_repos - new_pulling:
LOG.info("Removing unreferenced repository %s", unreferenced_repo)
RPKI_CLIENT_REMOVED_UNREFERENCED.inc()
try:
RPKI_CLIENT_PULLING.remove(unreferenced_repo)
RPKI_CLIENT_PULLED.remove(unreferenced_repo)
except KeyError:
pass
# Update pulling & pulled
for repo in new_pulling:
RPKI_CLIENT_PULLING.labels(repo).set_to_current_time()
for repo in parsed.pulled:
RPKI_CLIENT_PULLED.labels(repo).set_to_current_time()
RPKI_OBJECTS_COUNT.labels(type="files_removed").set(parsed.files_removed)
RPKI_OBJECTS_COUNT.labels(type="vanished_files").set(len(parsed.vanished_files))
RPKI_OBJECTS_COUNT.labels(type="vanished_directories").set(
len(parsed.vanished_directories)
)
new_warnings = parsed.statistics_by_host()
# Set 'missing' metric-label values to 0 since missing values are
# confusing (they disappear in prometheus and grafana)
for missing in missing_labels(self.warnings, new_warnings):
RPKI_CLIENT_WARNINGS.labels(
type=missing.warning_type, hostname=missing.hostname
).set(0)
# Set new values
for warning in new_warnings:
RPKI_CLIENT_WARNINGS.labels(
type=warning.warning_type, hostname=warning.hostname
).set(warning.count)
# And store
self.warnings = new_warnings
self.last_update_repos = new_pulling
async def update_validated_objects_gauge(self, returncode: int) -> None:
"""
Get statistics from `.metadata` of validated objects. Example output:
```
{
"buildmachine": "localhost.localdomain",
"buildtime": "2020-05-28T09:45:59Z",
"elapsedtime": "223",
"usertime": "46",
"systemtime": "57",
"roas": 16245,
"failedroas": 0,
"invalidroas": 0,
"certificates": 11835,
"failcertificates": 0,
"invalidcertificates": 0,
"tals": 1,
"talfiles": "/etc/pki/tals/ripe.tal",
"manifests": 11835,
"failedmanifests": 2,
"stalemanifests": 0,
"crls": 11833,
"repositories": 13,
"vrps": 87978,
"uniquevrps": 87978
}
```
"""
json_path = os.path.join(self.output_dir, "json")
if not os.path.isfile(json_path):
LOG.warning("json output file (%s) is missing", json_path)
return
with open(json_path, "r") as json_res:
metadata = json.load(json_res)["metadata"]
missing_keys = set()
for key in METADATA_LABELS:
value = metadata.get(key, None)
RPKI_OBJECTS_COUNT.labels(type=key).set(value)
if key not in OPTIONAL_METADATA_LABELS and value is None:
missing_keys.add(key)
if missing_keys:
LOG.info(
"keys (%s) missing in json .metadata (%s)",
", ".join(missing_keys),
json.dumps(metadata),
)
if returncode == 0:
RPKI_CLIENT_LAST_UPDATE.set_to_current_time()
``` |
{
"source": "job/rpkimancer",
"score": 2
} |
#### File: rpkimancer/cert/ee.py
```python
from __future__ import annotations
import logging
import typing
from cryptography import x509
from cryptography.hazmat.primitives.asymmetric import padding
from .base import BaseResourceCertificate, ManifestEntryInfo
from .ca import CertificateAuthority
from .oid import SIA_OBJ_ACCESS_OID
if typing.TYPE_CHECKING:
from ..sigobj import SignedObject
log = logging.getLogger(__name__)
class EECertificate(BaseResourceCertificate):
"""RPKI EE Certificate - RFC6487."""
def __init__(self, *,
signed_object: SignedObject,
**kwargs: typing.Any) -> None:
"""Initialise the EE Certificate."""
self._signed_object = signed_object
common_name = signed_object.econtent.signed_attrs_digest()
super().__init__(common_name=common_name, **kwargs)
@property
def signed_object(self) -> SignedObject:
"""Get the SignedObject that this certificate signs."""
return self._signed_object
@property
def issuer_repo_path(self) -> str:
"""Get the filesystem path to the the issuer publication point."""
return typing.cast(CertificateAuthority, self.issuer).repo_path
@property
def mft_entry(self) -> typing.Optional[ManifestEntryInfo]:
"""Get an entry for inclusion in the issuer's manifest."""
return (self.signed_object.file_name,
self.signed_object.to_der())
@property
def sia(self) -> typing.Optional[x509.SubjectInformationAccess]:
"""Get the SubjectInformationAccess extension for the certificate."""
sia_obj_uri = f"{self.base_uri}/" \
f"{self.issuer_repo_path}/" \
f"{self.signed_object.file_name}"
sia = x509.SubjectInformationAccess([
x509.AccessDescription(SIA_OBJ_ACCESS_OID,
x509.UniformResourceIdentifier(sia_obj_uri)), # noqa: E501
])
return sia
def sign_object(self) -> bytes:
"""Construct a signature over the signedAttrs of the SignedObject."""
message = self.signed_object.econtent.signed_attrs().to_der()
signature = self.private_key.sign(data=message,
padding=padding.PKCS1v15(),
algorithm=self.HASH_ALGORITHM)
return signature
def publish(self, *, pub_path: str, **kwargs: typing.Any) -> None:
"""Publish the SignedObject artifact as a DER file in the PP."""
self.signed_object.publish(pub_path=pub_path,
uri_path=self.uri_path,
repo_path=self.issuer_repo_path,
**kwargs)
```
#### File: rpkimancer/cert/extensions.py
```python
from __future__ import annotations
import logging
from cryptography import x509
from .oid import AS_RESOURCES_OID, IP_RESOURCES_OID
from ..resources import (ASIdentifiers, AsResourcesInfo,
IPAddrBlocks, IpResourcesInfo)
log = logging.getLogger(__name__)
class IpResources(x509.UnrecognizedExtension):
"""IP Address Resources X.509 certificate extension - RFC3779."""
# TODO: IPAddressRange support
def __init__(self, ip_resources: IpResourcesInfo) -> None:
"""Initialise the certificate extension."""
ip_address_blocks_data = IPAddrBlocks(ip_resources).to_der()
super().__init__(IP_RESOURCES_OID, ip_address_blocks_data)
class AsResources(x509.UnrecognizedExtension):
"""AS Number Resources X.509 certificate extension - RFC3779."""
def __init__(self, as_resources: AsResourcesInfo) -> None:
"""Initialise the certificate extension."""
as_identifiers_data = ASIdentifiers(as_resources).to_der()
super().__init__(AS_RESOURCES_OID, as_identifiers_data)
```
#### File: rpkimancer/cli/conjure.py
```python
from __future__ import annotations
import argparse
import importlib.metadata
import ipaddress
import logging
import os
import typing
from . import Args, BaseCommand, Return
if typing.TYPE_CHECKING:
from ..cert import CertificateAuthority
from ..sigobj.roa import RoaNetworkInfo
log = logging.getLogger(__name__)
DEFAULT_OUTPUT_DIR = os.path.join(os.curdir, "target", "demo")
PUB_SUB_DIR = "repo"
TAL_SUB_DIR = "tals"
DEFAULT_TA_AS_RESOURCES = [(0, 4294967295)]
DEFAULT_TA_IP_RESOURCES = [ipaddress.ip_network("0.0.0.0/0"),
ipaddress.ip_network("::0/0")]
DEFAULT_CA_AS_RESOURCES = [65000]
DEFAULT_CA_IP_RESOURCES = [ipaddress.ip_network("10.0.0.0/8"),
ipaddress.ip_network("2001:db8::/32")]
DEFAULT_GBR_FULLNAME = "<NAME>"
DEFAULT_GBR_ORG = "Example Org"
DEFAULT_GBR_EMAIL = "<EMAIL>"
META_PATH = "<path>"
META_AS = "<asn>"
META_IP = "<prefix>/<length>"
META_IP_MAXLEN = f"{META_IP}[-maxlen]"
META_NAME = "<name>"
META_ADDR = "<addr>"
class Conjure(BaseCommand):
"""Conjure a fully populated RPKI repository from thin air."""
subcommand = "conjure"
def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
"""Initialise subcommand."""
super().__init__(*args, **kwargs)
log.info("trying to load plugins")
self._plugins = list()
entry_point_name = "rpkimancer.cli.conjure"
entry_points = importlib.metadata.entry_points()
for entry_point in entry_points.get(entry_point_name, []):
cls = entry_point.load()
if issubclass(cls, ConjurePlugin):
plugin = cls(self.parser)
self._plugins.append(plugin)
def init_parser(self) -> None:
"""Set up command line argument parser."""
self.parser.add_argument("--output-dir", "-o",
default=DEFAULT_OUTPUT_DIR,
metavar=META_PATH,
help="Directory to write generated artifacts to " # noqa: E501
"(default: %(default)s)")
self.parser.add_argument("--ta-as-resources",
nargs="+", type=int,
default=DEFAULT_TA_AS_RESOURCES,
metavar=META_AS,
help="ASN(s) to include in TA certificate "
"(default: %(default)s)")
self.parser.add_argument("--ta-ip-resources",
nargs="+", type=ipaddress.ip_network,
default=DEFAULT_TA_IP_RESOURCES,
metavar=META_IP,
help="IP addresses to include in TA certificate " # noqa: E501
"(default: %(default)s)")
self.parser.add_argument("--ca-as-resources",
nargs="+", type=int,
default=DEFAULT_CA_AS_RESOURCES,
metavar=META_AS,
help="ASN(s) to include in suboridinate CA certificate " # noqa: E501
"(default: %(default)s)")
self.parser.add_argument("--ca-ip-resources",
nargs="+", type=ipaddress.ip_network,
default=DEFAULT_CA_IP_RESOURCES,
metavar=META_IP,
help="IP addresses to include in suboridinate CA certificate " # noqa: E501
"(default: %(default)s)")
self.parser.add_argument("--roa-asid",
type=int,
default=DEFAULT_CA_AS_RESOURCES[0],
metavar=META_AS,
help="ASN to include in ROA asID "
"(default: %(default)s)")
self.parser.add_argument("--roa-networks",
nargs="+", type=self._roa_network,
default=[(ipaddress.ip_network(net), None)
for net in DEFAULT_CA_IP_RESOURCES],
metavar=META_IP_MAXLEN,
help="IP prefixes to include in ROA "
"(default: %(default)s)")
self.parser.add_argument("--gbr-full-name",
default=DEFAULT_GBR_FULLNAME,
metavar=META_NAME,
help="Full name to include in GBR "
"(default: %(default)s)")
self.parser.add_argument("--gbr-org",
default=DEFAULT_GBR_ORG,
metavar=META_NAME,
help="Organisation name to include in GBR "
"(default: %(default)s)")
self.parser.add_argument("--gbr-email",
default=DEFAULT_GBR_EMAIL,
metavar=META_ADDR,
help="Email address to include in GBR "
"(default: %(default)s)")
def run(self,
parsed_args: Args,
*args: typing.Any,
**kwargs: typing.Any) -> Return:
"""Run with the given arguments."""
log.info("setting up rpkimancer library objects")
from ..cert import CertificateAuthority, TACertificateAuthority
from ..sigobj import RouteOriginAttestation, RpkiGhostbusters
# create CAs
log.info("creating TA certificate authority")
ta = TACertificateAuthority(as_resources=parsed_args.ta_as_resources,
ip_resources=parsed_args.ta_ip_resources)
log.info("creating suboridinate certificate authority")
ca = CertificateAuthority(issuer=ta,
as_resources=parsed_args.ca_as_resources,
ip_resources=parsed_args.ca_ip_resources)
# create ROA
log.info("creating ROA object")
RouteOriginAttestation(issuer=ca,
as_id=parsed_args.roa_asid,
ip_address_blocks=parsed_args.roa_networks)
# create GBR
log.info("creating ghostbusters record object")
RpkiGhostbusters(issuer=ca,
full_name=parsed_args.gbr_full_name,
org=parsed_args.gbr_org,
email=parsed_args.gbr_email)
# run plugins
log.info("running plugins")
plugin_publish_kwargs = dict()
for plugin in self._plugins:
log.info(f"running plugin {plugin}")
if (kwargs := plugin(parsed_args, ca)) is not None:
log.debug(f"{plugin} returned kwargs for publish: {kwargs}")
plugin_publish_kwargs.update(kwargs)
# publish objects
log.info(f"publishing in-memory objects to {parsed_args.output_dir}")
ta.publish(pub_path=os.path.join(parsed_args.output_dir, PUB_SUB_DIR),
tal_path=os.path.join(parsed_args.output_dir, TAL_SUB_DIR),
**plugin_publish_kwargs)
return None
@staticmethod
def _roa_network(input_str: str) -> RoaNetworkInfo:
"""Convert input string to RoaNetworkInfo tuple."""
try:
network, maxlen = input_str.split("-", 1)
return (ipaddress.ip_network(network), int(maxlen))
except ValueError:
return (ipaddress.ip_network(input_str), None)
PluginReturn = typing.Optional[typing.Mapping[str, str]]
class ConjurePlugin:
"""Base class for conjure subcommand plugins."""
def __init__(self, parent: argparse.ArgumentParser) -> None:
"""Initialise the plugin."""
self.parser = parent
self.init_parser()
def __call__(self,
parsed_args: argparse.Namespace,
ca: CertificateAuthority,
*args: typing.Any,
**kwargs: typing.Any) -> PluginReturn:
"""Run the plugin."""
return self.run(parsed_args, ca, *args, **kwargs)
def init_parser(self) -> None:
"""Set up command line argument parser."""
raise NotImplementedError
def run(self,
parsed_args: Args,
ca: CertificateAuthority,
*args: typing.Any,
**kwargs: typing.Any) -> PluginReturn:
"""Run with the given arguments, returning extra publish kwargs."""
raise NotImplementedError
```
#### File: rpkimancer/rpkimancer/cms.py
```python
from __future__ import annotations
import logging
from .asn1 import Content
from .asn1.mod import CryptographicMessageSyntax_2009
from .asn1.types import OID
log = logging.getLogger(__name__)
class ContentData(Content):
"""Generic base class for ASN.1 types idenitied by an OID."""
content_type: OID
class ContentInfo(Content):
"""CMS ASN.1 ContentInfo type - RFC5911."""
content_syntax = CryptographicMessageSyntax_2009.ContentInfo
def __init__(self, content: ContentData) -> None:
"""Initialise the instance from contained ContentData."""
log.info(f"preparing data for {self}")
content_type_oid = content.content_type.get_val()
content_type_name = content.content_syntax.fullname()
content_data = content.content_data
data = {"contentType": content_type_oid,
"content": (content_type_name, content_data)}
super().__init__(data)
class SignedData(ContentData):
"""CMS ASN.1 SignedData type - RFC5911."""
content_type = CryptographicMessageSyntax_2009.id_signedData
content_syntax = CryptographicMessageSyntax_2009.SignedData
class SignedAttributes(Content):
"""CMS ASN.1 SignedAttributes type - RFC5911."""
content_syntax = CryptographicMessageSyntax_2009.SignedAttributes
def __init__(self, content_type: OID, message_digest: bytes) -> None:
"""Initialise the instance from an eContentType and eContent digest."""
log.info(f"preparing data for {self}")
ct_attr_oid = CryptographicMessageSyntax_2009.id_contentType.get_val()
md_attr_oid = CryptographicMessageSyntax_2009.id_messageDigest.get_val() # noqa: E501
data = [
{
"attrType": ct_attr_oid,
"attrValues": [('ContentType', content_type.get_val())],
},
{
"attrType": md_attr_oid,
"attrValues": [('MessageDigest', message_digest)],
},
]
super().__init__(data)
class EncapsulatedContentInfo(Content):
"""CMS ASN.1 EncapsulatedContentInfo type - RFC5911."""
content_syntax = CryptographicMessageSyntax_2009.EncapsulatedContentInfo
@classmethod
def from_content_info(cls,
content_info: ContentInfo) -> EncapsulatedContentInfo: # noqa: E501
"""De-encapsulate from ContentInfo instance."""
val_path = ["content", "SignedData", "encapContentInfo"]
with content_info.constructed() as instance:
data = instance.get_val_at(val_path)
return cls(data)
@property
def econtent_bytes(self) -> bytes:
"""Recover eContent OCTET STRING."""
with self.constructed() as instance:
econtent = instance.get_internals()["cont"]["eContent"]
log.debug(f"{econtent.to_der()}")
econtent_bytes: bytes = econtent.get_const()["cont"].to_der()
return econtent_bytes
``` |
{
"source": "job/rpki-ov-checker",
"score": 2
} |
#### File: job/rpki-ov-checker/setup.py
```python
import rpki_ov_checker
version = rpki_ov_checker.__version__
import codecs
import os
import sys
from os.path import abspath, dirname, join
from setuptools import setup, find_packages
here = abspath(dirname(__file__))
def parse_requirements(filename):
""" load requirements from a pip requirements file """
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
with codecs.open(join(here, 'README.md'), encoding='utf-8') as f:
README = f.read()
if sys.argv[-1] == 'publish':
os.system('python3 setup.py sdist upload')
print("You probably want to also tag the version now:")
print((" git tag -a %s -m 'version %s'" % (version, version)))
print(" git push --tags")
sys.exit()
install_reqs = parse_requirements('requirements.txt')
reqs = install_reqs
setup(
name='rpki-ov-checker',
version=version,
maintainer="<NAME>",
maintainer_email='<EMAIL>',
url='https://github.com/job/rpki-ov-checker',
description='RPKI Origin Validation checker',
long_description=README,
long_description_content_type="text/markdown",
license='ISCL',
keywords='rpki prefix routing networking',
setup_requires=reqs,
install_requires=reqs,
classifiers=[
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Networking',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3 :: Only'
],
packages=find_packages(exclude=['tests', 'tests.*']),
entry_points={'console_scripts':
['rpki-ov-checker = rpki_ov_checker.checker:main']},
)
``` |
{
"source": "job/rpki-ov-route-map",
"score": 2
} |
#### File: rpki-ov-route-map/rpki_ov_route_map/rpki_ov_route_map.py
```python
from aggregate6 import aggregate
from ipaddress import ip_network
import argparse
import collections
import json
import pprint
import requests
import rpki_ov_route_map
import sys
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-c', dest='cache',
default="https://rpki.gin.ntt.net/api/export.json",
type=str,
help="""Location of the RPKI Cache in JSON format
(default: https://rpki.gin.ntt.net/api/export.json)""")
parser.add_argument('-v', '--version', action='version',
version='%(prog)s ' + rpki_ov_route_map.__version__)
args = parser.parse_args()
if 'http' in args.cache:
r = requests.get(args.cache, headers={'Accept': 'text/json'})
validator_export = r.json()
else:
validator_export = json.load(open(args.cache, "r"))
print("""!
ip bgp-community new-format
no ip community-list rpki-not-found
ip community-list standard rpki-not-found permit 65000:0
no ip community-list rpki-valid
ip community-list standard rpki-valid permit 65000:1
no ip community-list rpki-invalid
ip community-list standard rpki-invalid permit 65000:2
no ip community-list rpki
ip community-list expanded rpki permit 65000:[123]
!""")
data = dict()
data['vrps'] = load_vrp_list(validator_export)
data['origins'] = collections.defaultdict(set)
covered_space = set()
for vrp in data['vrps']:
covered_space.add(vrp['prefix'])
if vrp['prefixlen'] == vrp['maxlen']:
entry = vrp['prefix']
else:
entry = "{} le {}".format(vrp['prefix'], vrp['maxlen'])
data['origins'][vrp['origin']].add(entry)
print("no ip prefix-list rpki-covered-space-v4")
for i in aggregate(covered_space):
print("ip prefix-list rpki-covered-space-v4 permit {} le 32".format(i))
print("!")
for origin, prefixes in data['origins'].items():
if origin == 0:
continue
print("!")
print("no ip prefix-list rpki-origin-AS{}".format(origin))
for prefix in prefixes:
print("ip prefix-list rpki-origin-AS{} permit {}".format(origin,
prefix))
print("!")
print("no ip as-path access-list {}".format(origin))
print("ip as-path access-list {} permit _{}$".format(origin, origin))
print("""!
! test whether BGP NLIR is covered by RPKI ROA or not
route-map rpki-ov permit 1
match ip address prefix-list rpki-covered-space-v4
set comm-list rpki delete
continue 3
!
! BGP announcement is not covered by RPKI ROA, mark as not-found and exit
route-map rpki-ov permit 2
set comm-list rpki delete
set community 65000:0 additive
!
! find RPKI valids""")
n = 3
for origin in data['origins'].keys():
if origin == 0:
continue
print("!")
print("route-map rpki-ov permit {}".format(n))
print(" match ip prefix-list rpki-origin-AS{}".format(origin))
print(" match as-path {}".format(origin))
print(" set community 65000:1")
n += 1
print("!")
print("! Reject RPKI Invalid BGP announcements")
print("route-map rpki-ov deny {}".format(n))
# phase 3 reject invalids
def load_vrp_list(export):
"""
:param export: the JSON blob with all ROAs
"""
vrp_list = []
for vrp in export['roas']:
prefix_obj = ip_network(vrp['prefix'])
if prefix_obj.version == 6:
continue
try:
asn = int(vrp['asn'].replace("AS", ""))
if not 0 <= asn < 4294967296:
raise ValueError
except ValueError:
print("ERROR: ASN malformed", file=sys.stderr)
print(pprint.pformat(vrp, indent=4), file=sys.stderr)
continue
prefix = str(prefix_obj)
prefixlen = prefix_obj.prefixlen
maxlength = int(vrp['maxLength'])
vrp_list.append((prefix, prefixlen, maxlength, asn))
vrp_list_uniq = []
for vrp in set(vrp_list):
vrp_list_uniq.append({'prefix': vrp[0], 'prefixlen': vrp[1],
'maxlen': vrp[2], 'origin': vrp[3]})
return vrp_list_uniq
``` |
{
"source": "job/rpki-vrp-checker",
"score": 2
} |
#### File: rpki-vrp-checker/rpki_vrp_checker/main.py
```python
import json
import radix
import sys
import yaml
from rpki_vrp_checker import __version__
def parse_args(args):
import argparse
parser = argparse.ArgumentParser(
description="Perform various business logic test operations on a "
"given collection of RPKI based VRPs.")
parser.add_argument("-v", dest="verbose", action="store_true",
default=False,
help="Be verbose")
parser.add_argument("-i", dest="inputpath", required=True,
help="Input JSON file containing to-be-checked VRPs")
parser.add_argument("-b", dest="blessedpath", required=True,
help="Path to output the blessed JSON file checked VRPs")
# parser.add_argument("-s", dest="slurmpath",
# help="Path to SLURM JSON file")
parser.add_argument("-c", dest="canariespath", required=True,
help="Path to YAML file with expected ROA canaries")
parser.add_argument("-V", dest="version", action="store_true",
help="Display rpki-vrp-checker version")
return parser.parse_args(args)
def pretty_print(pack, num=False):
if num:
print("Entry #%s:" % num)
print(" Prefix : %s (MaxLength: %s)" % (pack['p'], pack['ml']))
print(" Origin AS : %s" % pack['asn'])
print(" Trust Anchor : %s" % pack['ta'].upper())
print()
def main():
args = parse_args(sys.argv[1:])
if args.version: # pragma: no cover
print("rpki-vrp-checker %s" % __version__)
sys.exit()
verbose = args.verbose
f = open(args.inputpath)
roas = json.load(f)
f.close()
f = open(args.canariespath)
canaries = yaml.safe_load(f)
f.close()
vrp_count = 0
tree = radix.Radix()
for r in roas['roas']:
asn = r['asn']
expires = r['expires']
prefix = r['prefix']
ml = r['maxLength']
ta = r['ta']
rnode = tree.search_exact(prefix)
pack = {'p': prefix, 'ta': ta, 'ml': ml, 'asn': asn}
if not rnode:
rnode = tree.add(prefix)
rnode.data['vrps'] = [pack]
else:
if pack not in rnode.data['vrps']:
rnode.data['vrps'].append(pack)
vrp_count += 1
if verbose:
print("loaded %s prefixes (%s vrps)" % (len(tree.prefixes()), vrp_count))
roas_of_interest = []
canaries_of_interest = []
for c in canaries:
prefix = c['prefix']
pack = {'p': prefix, 'ta': c['ta'], 'ml': c['maxlength'],
'asn': c['origin']}
canaries_of_interest.append(pack)
rnode = tree.search_worst(prefix)
if rnode:
for r in tree.search_covered(rnode.prefix):
for vrp in r.data['vrps']:
if vrp not in roas_of_interest:
roas_of_interest.append(vrp)
if verbose:
print("loaded %s canaries" % len(canaries))
print("ERROR: WRONG AT RIR LEVEL?")
print("--------------------------\n")
print("The following RPKI VRPs have been registered at the RIR level,\n"
"but were not added as canaries:\n")
c = 0
for vrp in roas_of_interest:
if vrp not in canaries_of_interest:
c += 1
pretty_print(vrp, c)
print("ERROR: MISSING VIPAR RPKI CANARY REGISTRATIONS?")
print("-----------------------------------------------\n")
print("The following canaries are not visible in "
"RPKI data at the RIR level:\n")
c = 0
for canarie in canaries_of_interest:
if canarie not in roas_of_interest:
c += 1
pretty_print(canarie, c)
# each canary must *exactly* be present in the VRP tree
# no less-specific VRPs must exist for any not-exactly registered canaries
# no more-specific VRPs must exist for any not-exactly registered canaries
if __name__ == '__main__':
sys.exit(main())
```
#### File: rpki-vrp-checker/tests/test_regression.py
```python
from vrp_checker import checker
from mock import patch
import io
import sys
import unittest
def stub_stdin(testcase_inst, inputs):
stdin = sys.stdin
def cleanup():
sys.stdin = stdin
testcase_inst.addCleanup(cleanup)
sys.stdin = StringIO(inputs)
def stub_stdouts(testcase_inst):
stderr = sys.stderr
stdout = sys.stdout
def cleanup():
sys.stderr = stderr
sys.stdout = stdout
testcase_inst.addCleanup(cleanup)
sys.stderr = StringIO()
sys.stdout = StringIO()
class TestAggregate(unittest.TestCase):
def test_00__roa_missing(self):
pass
def test_15_verbose(self):
stub_stdin(self, '10.0.0.0/24 10.0.1.0/24 172.16.0.0/24 10.0.0.0/32\n')
stub_stdouts(self)
with patch.object(sys, 'argv', ["prog.py", "-v"]):
agg_main()
self.assertEqual(sys.stdout.getvalue(), "+ 10.0.0.0/23\n- 10.0.0.0/24\n- 10.0.0.0/32\n- 10.0.1.0/24\n 172.16.0.0/24\n")
class StringIO(io.StringIO):
"""
A "safely" wrapped version
"""
def __init__(self, value=''):
value = value.encode('utf8', 'backslashreplace').decode('utf8')
io.StringIO.__init__(self, value)
def write(self, msg):
io.StringIO.write(self, msg.encode(
'utf8', 'backslashreplace').decode('utf8'))
def main():
unittest.main()
if __name__ == '__main__':
main()
``` |
{
"source": "jobryan/QuantPy",
"score": 2
} |
#### File: QuantPy/quantpy/event_profiler.py
```python
from pylab import errorbar, xlabel, ylabel, show, legend
from numpy import array, arange
def event_profiler(asset,truth,periods=5):
cut = []
for i in range(periods,len(asset)-periods):
if truth[i] == 1 and asset[i] > 0:
cut.append(asset[i-periods:i+periods]/asset[i])
return array(cut)
def plot_event_profile(events,name=''):
mn = events.mean(axis=0) - 1.0
st = events.std(axis=0)
errorbar(arange(len(mn)),mn,st,label=name)
xlabel('Periods', fontsize=16)
ylabel('Price Change %', fontsize=16)
if len(name) > 1:
legend(loc='best',shadow=True, fancybox=True)
show()
``` |
{
"source": "Jobs23/Reddit_Reader",
"score": 3
} |
#### File: Jobs23/Reddit_Reader/reddit_news_reader.py
```python
from flask import Flask
import json, requests, time, unidecode
app = Flask(__name__)
def get_headlines():
user_pass_dict = {'user':'your_username','passwd':'<PASSWORD>','api_type':'json'}
sess = requests.Session()
sess.headers.update({'User-Agent':'Trying some stuff out'})
sess.post('https://www.reddit.com/api/login', data = user_pass_dict)
time.sleep(1)
url = 'https://www.reddit.com/r/news/.json?limit=15'
html = sess.get(url)
data = json.loads(html.content.decode('utf-8'))
li = [each for each in data['data']['children']] #[u'modhash', u'whitelist_status', u'children', u'after', u'before']
titles = [unidecode.unidecode(listing['data']['title']) for listing in data['data']['children']]
titles = '\n\n'.join([i for i in titles])
return titles
titles = get_headlines()
print titles
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "jobscore/ansible-role-resque-exporter",
"score": 2
} |
#### File: default/tests/test_default.py
```python
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_services_running_and_enabled(host):
assert host.service('prometheus-resque-exporter').is_enabled
assert host.service('prometheus-resque-exporter').is_running
def test_node_exporter_metrics(host):
out = host.check_output('curl http://localhost:9447/metrics')
assert 'resque_up' in out
``` |
{
"source": "jobscry/preader",
"score": 2
} |
#### File: preader/reader/admin.py
```python
from django.contrib import admin
from django.utils.timezone import now
from .models import Feed, Entry, FeedLog, UserEntry
class FeedLogAdmin(admin.ModelAdmin):
list_display = (
'feed',
'status_code',
'datetime',
'entries',
'duration',
)
date_hierarchy = 'datetime'
admin.site.register(FeedLog, FeedLogAdmin)
def force_update_next(modeladmin, request, queryset):
current_time = now()
queryset.update(
next_checked=current_time,
etag='',
last_modified=None
)
force_update_next.short_description = 'Force Feed Update on Next Scan'
def clear_errors(modeladmin, request, queryset):
queryset.update(error_count=0)
clear_errors.short_description = 'Reset Error Count'
def disable_feeds(modeladmin, request, queryset):
queryset.update(disabled=True)
disable_feeds.short_description = 'Disable Feeds'
def enable_feeds(modeladmin, request, queryset):
queryset.update(disabled=False)
enable_feeds.short_description = 'Enable Feeds'
class FeedAdmin(admin.ModelAdmin):
list_display = (
'title',
'disabled',
'has_subscribers',
'has_new_entries',
'last_checked',
'next_checked',
'check_frequency',
'error_count'
)
list_filter = ('disabled', 'has_new_entries', 'check_frequency')
actions = [
force_update_next,
clear_errors,
disable_feeds,
enable_feeds
]
admin.site.register(Feed, FeedAdmin)
class EntryAdmin(admin.ModelAdmin):
list_display = (
'feed',
'title',
'added_to_subscribers',
'updated',
'published'
)
list_filter = ('feed', 'added_to_subscribers')
admin.site.register(Entry, EntryAdmin)
class UserEntryAdmin(admin.ModelAdmin):
list_display = (
'user',
'feed',
'entry',
'status'
)
list_filter = ('status', )
admin.site.register(UserEntry, UserEntryAdmin)
```
#### File: preader/reader/forms.py
```python
from django import forms
from .models import Feed
class URLForm(forms.Form):
url = forms.URLField(label='URL', max_length=255)
class NewSubscriptionForm(forms.Form):
feeds = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple, label='URLs')
def __init__(self, *args, **kwargs):
feed_id_list = kwargs.pop('feed_id_list')
super(NewSubscriptionForm, self).__init__(*args, **kwargs)
self.fields['feeds'] = forms.MultipleChoiceField(
choices=Feed.objects.filter(id__in=feed_id_list).values_list(
'id', 'feed_url'), widget=forms.CheckboxSelectMultiple, label='URLs'
)
class NewFeedForm(forms.ModelForm):
class Meta:
model = Feed
fields = ('feed_url', )
```
#### File: preader/reader/models.py
```python
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Q
from django.utils.http import http_date
from django.utils.timezone import (
now,
make_aware,
make_naive,
get_current_timezone
)
from django_bleach.models import BleachField
from bs4 import BeautifulSoup
from model_utils import Choices
from model_utils.managers import QueryManager
from model_utils.models import TimeStampedModel
from datetime import datetime, timedelta
from time import mktime
from urllib.parse import urljoin
import hashlib
import bleach
import requests
from speedparser import speedparser
import email.utils as eut
import re
HEADERS = {
'User-Agent': getattr(settings, 'USER_AGENT', 'PReader 0.1')
}
FEED_TYPES = (
'application/atom+xml',
'application/rss+xml',
'text/xml'
)
BLEACH_TAGS = ['a', 'p', 'img', 'strong', 'em']
BLEACH_ATTRS = {
'*': ['class'],
'a': ['href', 'rel'],
'img': ['src', 'alt'],
}
CURRENT_TZ = get_current_timezone()
MAX_ERRORS = getattr(settings, 'MAX_ERRORS', 5)
REQ_MAX_REDIRECTS = getattr(settings, 'MAX_REDIRECTS', 3)
REQ_TIMEOUT = getattr(settings, 'TIMEOUT', 5.0)
MAX_FEEDS = getattr(settings, 'MAX_FEEDS', 5)
MAX_BULK_CREATE = getattr(settings, 'MAX_BULK_CREATE', 100)
# stolen from http://code.activestate.com/recipes/363841-detect-character-encoding-in-an-xml-file/
xmlDec = r"""
^<\?xml # w/o BOM, xmldecl starts with <?xml at the first byte
.+? # some chars (version info), matched minimal
encoding= # encoding attribute begins
["'] # attribute start delimiter
[^"']+ # every character not delimiter (not overly exact!)
["'] # attribute end delimiter
.*? # some chars optionally (standalone decl or whitespace)
\?> # xmldecl end
"""
XML_DECLARATION = re.compile(xmlDec, re.I | re.X)
alphanum = re.compile(r'[\W_]+')
class SimpleBufferObject(object):
def __init__(self, model, max_items=None):
if max_items is None:
self.max = MAX_BULK_CREATE
else:
self.max = max_items
self.buffer = list()
self.count = 0
self.model = model
def __enter__(self):
return self
def add(self, item):
self.buffer.append(item)
self.count += 1
if self.count >= self.max:
self.purge()
def purge(self):
self.model.objects.bulk_create(self.buffer)
del self.buffer
self.buffer = list()
self.count = 0
def __exit__(self, *args, **kwargs):
self.purge()
class Feed(TimeStampedModel):
CHECK_FREQUENCY_CHOICES = Choices(
(1, 'h', 'Every Hour'),
(12, 'th', 'Every 12 Hours'),
(24, 'd', 'Every Day')
)
title = models.CharField(max_length=255) # required
description = BleachField(blank=True)
icon = models.URLField(blank=True)
site_url = models.URLField(blank=True)
feed_url = models.URLField(unique=True) # required
disabled = models.BooleanField(db_index=True, default=False)
last_checked = models.DateTimeField(null=True, blank=True)
next_checked = models.DateTimeField(null=True, blank=True)
check_frequency = models.PositiveSmallIntegerField(
choices=CHECK_FREQUENCY_CHOICES, default=CHECK_FREQUENCY_CHOICES.h)
error_count = models.PositiveSmallIntegerField(default=0)
etag = models.CharField(max_length=255, blank=True)
last_modified = models.DateTimeField(null=True, blank=True)
subscriptions = models.ManyToManyField(User)
objects = models.Manager()
active = QueryManager(disabled=False, has_subscribers=True)
has_subscribers = models.BooleanField(default=False)
has_new_entries = models.BooleanField(default=False, db_index=True)
def __str__(self): # pragma: no cover
return self.title
class Meta:
ordering = ('-modified', '-created')
def subscribe(self, user):
self.subscriptions.add(user)
UserEntry.subscribe_users(user, self)
if not self.has_subscribers:
self.has_subscribers = True
self.save()
def unsubscribe(self, user):
self.subscriptions.remove(user)
UserEntry.unsubscribe_users(user, self)
if self.subscriptions.count() < 1:
self.has_subscribers = False
self.save()
def is_subscribed(self, user):
return self.subscriptions.filter(pk=user.pk).exists()
def increment_error_count(self):
self.error_count += 1
if self.error_count >= MAX_ERRORS:
self.disabled = True
def reset_error_count(self):
self.error_count = 0
self.disabled = False
@staticmethod
def get_feeds_from_url(url):
"""
From URL, check if URL is a feed or if URL has feeds.
Returns a list of Feed objects with feeds from URL
"""
feeds = []
# check if URL is already in database
existing = Feed.objects.filter(feed_url=url)
if existing:
return [existing.first(), ]
# not in database, check the URL via GET request
req = requests.get(
url,
headers=HEADERS,
allow_redirects=True
)
if req.status_code == requests.codes.ok:
req.encoding = 'utf-8'
# sometimes content types have extra text, get rid of ';'
content_type = req.headers.get('content-type', None)
if ';' in content_type:
content_type = content_type.split(';')[0]
# is this URL a feed?
if content_type in FEED_TYPES:
feed, created = Feed.objects.get_or_create(feed_url=req.url, defaults={'title': 'no title yet'})
return [feed, ]
# no feed, check for feeds in head
html = BeautifulSoup(req.text, 'lxml')
if html.head is not None:
for feed_type in FEED_TYPES:
feed_count = 0
for link in html.head.find_all(type=feed_type):
feed_url = urljoin(req.url, link.get('href'))
feed, created = Feed.objects.get_or_create(
feed_url=feed_url,
defaults={'title': 'no title yet'}
)
feeds.append(feed)
feed_count += 1
if feed_count >= MAX_FEEDS:
break
return feeds
return feeds
@staticmethod
def update_feeds(num=10):
with SimpleBufferObject(Entry) as new_entry_buffer:
current_time = now()
# get all active feeds with subscribers that have not been checked or need to be checked based
# on "next_checked"
feeds = Feed.active.filter(Q(next_checked=None) | Q(next_checked__lte=current_time))[:num]
for feed in feeds:
# update last checked to current time
feed.last_checked = now()
# set "next_checked" based on "check_frequency"
feed.next_checked = feed.last_checked + timedelta(hours=feed.check_frequency)
# create new FeedLog object
log = FeedLog(feed=feed)
notes = []
# load conditional GET headers from feed object
headers = HEADERS
if feed.etag and feed.etag != '':
headers['If-None-Match'] = feed.etag
if feed.last_modified:
last_modified = make_naive(feed.last_modified)
headers['If-Modified-Since'] = http_date(last_modified.timestamp())
try:
req = requests.get(feed.feed_url, headers=headers, allow_redirects=True)
log.status_code = req.status_code
log.headers = ', '.join("{!s}={!r}".format(key, val) for (key, val) in headers.items())
log.headers += "--\n"
log.headers += ', '.join("{!s}={!r}".format(key, val) for (key, val) in req.headers.items())
notes.append('updating {0}'.format(feed))
# update feed URL if redirected or altered
if (req.url != feed.feed_url) and (req.history[-1].status_code == 301):
# if updated feed URL already exists, something is wrong
if Feed.objects.filter(feed_url=req.url).exists():
feed.disabled = True
notes.append(
'Feed URL does not match response, \
but new feed already exists with {0}.'.format(req.url)
)
else:
notes.append('Updating feed url from {0} to {1}.'.format(feed.feed_url, req.url))
feed.feed_url = req.url
if req.status_code == requests.codes.not_modified:
notes.append('not modified')
elif req.status_code == requests.codes.ok:
notes.append('status OK, parsing')
# update conditional GET data
feed.etag = alphanum.sub('', req.headers.get('etag', ''))
feed.last_modified = parse_http_date(
req.headers.get('last-modified', None), default=feed.last_checked)
# must remove encoding declaration from feed or lxml will pitch a fit
text = XML_DECLARATION.sub('', req.text, 1)
parsed = speedparser.parse(text, encoding=req.encoding)
# bozo feed
if parsed.bozo == 1:
notes.append('bozo feed')
notes.append(parsed.bozo_tb)
feed.increment_error_count()
else:
# update feed meta data, reset error count
feed.reset_error_count()
feed.title = parsed.feed.get('title', feed.title)
feed.title = shorten_string(feed.title)
feed.description = parsed.feed.get('description', parsed.feed.get('subtitle', None))
# icon/logo are not working in speedparser
# feed.icon = parsed.feed.get('logo', feed.icon)
# get latest existing entry for feed
try:
latest_entry = feed.entry_set.latest()
except Entry.DoesNotExist:
latest_entry = None
for count, entry in enumerate(parsed.entries):
published = feed_datetime(
entry.get('published_parsed', entry.get('updated_parsed', None)),
default=feed.last_checked
)
# only proceed if entry is newer than last
# entry for feed
if latest_entry is None or published > latest_entry.published:
# entry ID is a hash of the link or entry id
entry_id = hashlib.sha1(entry.get('id', entry.link).encode('utf-8')).hexdigest()
author = bleach.clean(
entry.get('author', 'no author'), strip=True, strip_comments=True)
author = shorten_string(author)
content = None
content_items = entry.get('content', None)
if content_items is None:
content = entry.get('summary', 'No summary.')
else:
for c in content_items:
if c.get('type', None) in ('text', 'html', 'xhtml', None):
if content is None:
content = c.get('value', '')
else:
content += c.get('value', '')
content = bleach.clean(
content, tags=BLEACH_TAGS, attributes=BLEACH_ATTRS, strip=True,
strip_comments=True)
title = bleach.clean(
entry.get('title', 'no title'), strip=True, strip_comments=True)
title = shorten_string(title)
new_entry_buffer.add(
Entry(
feed=feed,
entry_id=entry_id,
link=entry.get('link', ''),
title=title,
author=author,
content=content,
published=published,
updated=feed_datetime(entry.get('updated_parsed', None),
default=feed.last_checked)
)
)
log.entries += 1
else:
break
if log.entries > 0:
feed.has_new_feeds = True
else:
notes.append('error: {0}'.format(req.status_code))
feed.increment_error_count()
except requests.exceptions.Timeout: # pragma: no cover
log.notes = 'timeout error'
feed.increment_error_count()
except requests.exceptions.ConnectionError: # pragma: no cover
log.notes = 'connection error'
feed.increment_error_count()
except requests.exceptions.HTTPError: # pragma: no cover
log.notes = 'HTTP error'
feed.increment_error_count()
except requests.exceptions.TooManyRedirects: # pragma: no cover
log.notes = 'too many redirects'
feed.increment_error_count()
log.notes = '\n'.join(notes)
duration = now() - feed.last_checked
log.duration = duration.microseconds
feed.save()
log.save()
def shorten_string(string, max_len=255, end='...'):
if len(string) >= max_len:
reduce = max_len - len(end)
return string[:reduce] + end
return string
def feed_datetime(timetuple, allow_none=False, default=None):
"""
Feed Datetime
Utility for getting python datetime from entries. Converts a timetuple (if not None) to a timezone
aware python datetime object.
:param timetuple: timetuple if timetuple exists in entry element
:param allow_none: should None be returned if now datetime object exists? If allow_none is true and no timetuple
or default exists, return the current datetime
:param default: if timetuple is none, use this value
:return: a timezone aware python datetime object
"""
if timetuple is None:
if default is None:
if allow_none:
return None
return now()
return default
r = datetime.fromtimestamp(mktime(timetuple))
return make_aware(r, CURRENT_TZ)
def parse_http_date(http_date_str, default=None):
"""
Parse HTTP Date
Parses an RFC1123 date string and returns a datetime object
Example: Sun, 06 Nov 1994 08:49:37 GMT
:param http_date_str:
:param default: Python Datetime to return if http_date_str is None
:return:
"""
if not http_date_str:
return default
return datetime.fromtimestamp(mktime(eut.parsedate(http_date_str)))
class Entry(models.Model):
feed = models.ForeignKey(Feed)
entry_id = models.CharField(max_length=40)
link = models.URLField(max_length=2083)
title = models.CharField(max_length=255)
author = models.CharField(max_length=255, blank=True)
content = BleachField()
updated = models.DateTimeField(blank=True)
published = models.DateTimeField(db_index=True)
added_to_subscribers = models.BooleanField(default=False, db_index=True)
class Meta:
ordering = ('-published', '-updated')
get_latest_by = 'published'
verbose_name_plural = 'entries'
def __str__(self): # pragma: no cover
return '{0}: {1}'.format(self.feed, self.entry_id)
class UserEntry(models.Model):
UNREAD = 'u'
READ = 'r'
SAVED = 's'
STATUS = (
(UNREAD, 'Unread'),
(READ, 'Read'),
(SAVED, 'Saved'),
)
user = models.ForeignKey(User)
feed = models.ForeignKey(Feed)
entry = models.ForeignKey(Entry)
status = models.CharField(max_length=1, choices=STATUS, default=UNREAD)
objects = models.Manager()
read = QueryManager(status=READ)
saved = QueryManager(status=SAVED)
unread = QueryManager(status=UNREAD)
class Meta:
verbose_name = 'User Entry'
verbose_name_plural = 'User Entries'
@staticmethod
def update_subscriptions():
"""
Add New Entries
For all feeds with new_entries flag, get all subscribers and feeds with added_to_subscribers flag false.
For each subscriber, add to bulk create buffer a new UserEntry object.
Update flags on entries and feeds.
"""
with SimpleBufferObject(UserEntry) as user_entry_object_buffer:
feeds = Feed.active.filter(has_new_entries=True)
for feed in feeds:
entries = feed.entry_set.filter(added_to_subscribers=False).only('id')
if entries:
subscribers = feed.subscriptions.all()
for subscriber in subscribers:
for entry in entries:
user_entry_object_buffer.add(
UserEntry(user=subscriber, feed=feed, entry=entry)
)
entries.update(added_to_subscribers=True)
feed.has_new_entries = False
feed.save()
@staticmethod
def subscribe_users(users, feed):
if not hasattr(users, '__iter__'):
users = (users, )
user_entry_object_buffer = SimpleBufferObject(UserEntry)
for user in users:
entries = feed.entry_set.filter(added_to_subscribers=True).only('id')
if entries:
for entry in entries:
user_entry_object_buffer.add(
UserEntry(user=user, feed=feed, entry=entry)
)
user_entry_object_buffer.purge()
@staticmethod
def unsubscribe_users(users, feed):
if not hasattr(users, '__iter__'):
users = (users, )
UserEntry.objects.filter(user__in=users, feed=feed).delete()
class FeedLog(models.Model):
feed = models.ForeignKey(Feed, editable=False)
status_code = models.PositiveSmallIntegerField(null=True, blank=True)
headers = models.TextField(blank=True)
notes = models.TextField(blank=True)
duration = models.PositiveIntegerField()
datetime = models.DateTimeField(auto_now_add=True)
entries = models.PositiveIntegerField(default=0)
class Meta:
verbose_name = 'Feed Log'
verbose_name_plural = 'Feed Logs'
def __str__(self): # pragma: no cover
return '{0} ({1}) on {2}'.format(self.feed, self.status_code, self.datetime)
``` |
{
"source": "jobscry/soc-analyst",
"score": 3
} |
#### File: soc-analyst/analyst/config.py
```python
from aumbry import Attr, YamlConfig
class DatabaseConfig(YamlConfig):
__mapping__ = {"file_path": Attr("file_path", str)}
connection = ""
class AppConfig(YamlConfig):
__mapping__ = {
"db": Attr("db", DatabaseConfig),
"gunicorn": Attr("gunicorn", dict),
"version": Attr("version", str),
"asn_path": Attr("asn_path", str),
"geo_path": Attr("geo_path", str)
}
def __init__(self):
self.db = DatabaseConfig()
self.gunicorn = {}
self.version = None
self.asn_path = None
self.geo_path = None
```
#### File: soc-analyst/analyst/__main__.py
```python
import sys
import aumbry
from docopt import docopt
from gunicorn.app.base import BaseApplication
from gunicorn.workers.sync import SyncWorker
from analyst.app import AnalystService
from analyst.config import AppConfig
class CustomWorker(SyncWorker):
def handle_quit(self, sig, frame):
self.app.application.stop(sig)
super(CustomWorker, self).handle_quit(sig, frame)
def run(self):
self.app.application.start()
super(CustomWorker, self).run()
class GunicornApp(BaseApplication):
""" Custom Gunicorn application
This allows for us to load gunicorn settings from an external source
"""
def __init__(self, app, options=None):
self.options = options or {}
self.application = app
super(GunicornApp, self).__init__()
def load_config(self):
for key, value in self.options.items():
self.cfg.set(key.lower(), value)
self.cfg.set("worker_class", "analyst.__main__.CustomWorker")
def load(self):
return self.application
def main():
# docopt(__doc__)
cfg = aumbry.load(
aumbry.FILE, AppConfig, {"CONFIG_FILE_PATH": "./etc/analyst/config.yml"}
)
api_app = AnalystService(cfg)
gunicorn_app = GunicornApp(api_app, cfg.gunicorn)
gunicorn_app.run()
if __name__ == "__main__":
sys.exit(main())
```
#### File: analyst/middleware/cors.py
```python
class CORSComponentMiddleware:
# https://falcon.readthedocs.io/en/stable/user/faq.html#how-do-i-implement-cors-with-falcon
def process_response(self, req, resp, resource, req_succeeded):
resp.set_header("Access-Control-Allow-Origin", "*")
if (
req_succeeded
and req.method == "OPTIONS"
and req.get_header("Access-Control-Request-Method")
):
# NOTE(kgriffs): This is a CORS preflight request. Patch the
# response accordingly.
allow = resp.get_header("Allow")
resp.delete_header("Allow")
allow_headers = req.get_header(
"Access-Control-Request-Headers", default="*"
)
resp.set_headers(
(
("Access-Control-Allow-Methods", allow),
("Access-Control-Allow-Headers", allow_headers),
("Access-Control-Max-Age", "86400"), # 24 hours
)
)
```
#### File: analyst/middleware/json.py
```python
import falcon
class RequireJSONMiddleware:
"""
Require JSON Middleware
Ensure each requests is expecting a JSON response. Check HTTP Header `Accept`
"""
def process_request(self, req: falcon.Request, resp: falcon.Response):
if not req.client_accepts_json:
raise falcon.HTTPNotAcceptable(
"This API only accepts responses encoded as JSON."
)
```
#### File: analyst/models/manager.py
```python
from peewee import *
from analyst.models.user import User
from analyst.models.iplist import ListItem, IPList, IPListItem
class DBManager:
def __init__(self, db_path: str, db_classes: list = [User, ListItem, IPList, IPListItem]):
self.db_path = db_path
self.db_classes = db_classes
self.db = None
def setup(self):
self.db = SqliteDatabase(self.db_path)
self.db.bind(self.db_classes)
self.db.create_tables(self.db_classes)
```
#### File: analyst/resources/users.py
```python
import falcon
from falcon.media.validators.jsonschema import validate
from peewee import DoesNotExist, IntegrityError
from analyst.models.user import User, create_user
from analyst.resources import BaseResource, check_permission
from analyst.schemas import load_schema
class UsersResource(BaseResource):
def on_get(self, req: falcon.Request, resp: falcon.Response, username: str = None):
if username is None:
if not req.context["user"].is_admin:
raise falcon.HTTPForbidden(
"Forbidden", "Insufficient privileges for operation."
)
user = User.select(
User.username,
User.is_active,
User.is_admin,
User.is_manager,
User.created_on,
)
resp.media = {"users": list(user.dicts())}
else:
user = User.get_or_404(User.username == username)
if not req.context["user"].is_admin and req.context["user"].id != user.id:
raise falcon.HTTPForbidden(
"Forbidden", "Insufficient privileges for operation."
)
resp.media = {
"user": user.to_dict(
["username", "is_active", "is_admin", "is_manager", "created_on"]
)
}
@check_permission(lambda user: user.is_admin)
@validate(load_schema("create_user"))
def on_post(self, req: falcon.Request, resp: falcon.Response, username: str = None):
username = req.media.get("username", None)
password = req.media.get("password", None)
is_admin = req.media.get("is_admin", False)
is_manager = req.media.get("is_manager", False)
is_active = req.media.get("is_active", True)
try:
create_user(username, password, is_admin, is_manager, is_active)
resp.status = falcon.HTTP_201
resp.media = {"status": "Success", "message": "New user created."}
except IntegrityError:
raise falcon.HTTPBadRequest("Bad Request", "Username already exists.")
@validate(load_schema("update_user"))
def on_put(self, req: falcon.Request, resp: falcon.Response, username: str = None):
user = User.get_or_404(User.username == username)
if not req.context["user"].is_admin and req.context["user"].id != user.id:
raise falcon.HTTPForbidden(
"Forbidden", "Insufficient privileges for operation."
)
is_admin = req.media.get("is_admin", None)
is_manager = req.media.get("is_manager", None)
is_active = req.media.get("is_active", None)
if req.context["user"].id == user.id and (
is_admin is not None or is_manager is not None or is_active is not None
):
raise falcon.HTTPForbidden("Forbidden", "Can not modifiy own attributes.")
password = req.media.get("password", None)
if password is not None:
user.set_password(password)
if is_admin is not None:
user.is_admin = is_admin
if is_manager is not None:
user.is_manager = is_manager
if is_active is not None:
user.is_active = is_active
user.save()
resp.media = {"status": "Success", "message": "User updated."}
@check_permission(lambda user: user.is_admin)
def on_delete(
self, req: falcon.Request, resp: falcon.Response, username: str = None
):
user = User.get_or_404(User.username == username)
if req.context["user"].id == user.id:
raise falcon.HTTPBadRequest("Bad Request", "Can not delete self.")
user.delete_instance()
resp.media = {"status": "Success", "message": "User deleted."}
class InitResource(BaseResource):
"""
Initialize Resource
Creates a single admin user. Only cllable if no admin user exits.
"""
auth = {"auth_disabled": True}
@validate(load_schema("create_user_init"))
def on_post(self, req: falcon.Request, resp: falcon.Response):
if User.select().where(User.is_admin).count() > 0:
raise falcon.HTTPBadRequest("Bad Request", "App already initialized.")
token = create_user(
username=req.media.get("username"),
password=req.media.get("password"),
is_admin=True,
)
resp.status = falcon.HTTP_201
resp.media = {
"status": "Success",
"token": token,
"message": "First admin user created.",
}
```
#### File: soc-analyst/tests/test_resources_asn.py
```python
import pytest
from tests import client, superuser
import json
def test_asn_post_ok(client, superuser):
resp = client.simulate_post(
"/api/test/asn",
headers={"Authorization": f"Token {superuser}"},
json={"ips": ["1.1.1.1"]},
)
assert resp.status_code == 200
assert len(resp.json) == 1
def test_asn_get_notfound(client, superuser):
resp = client.simulate_get(
"/api/test/asn", headers={"Authorization": f"Token {superuser}"}
)
assert resp.status_code == 404
def test_asn_get_found(client, superuser):
resp = client.simulate_get(
"/api/test/asn/1.1.1.1", headers={"Authorization": f"Token {superuser}"}
)
assert resp.status_code == 200
assert "ip" in resp.json
``` |
{
"source": "jobscry/vuln_manager",
"score": 2
} |
#### File: vuln_manager/cpes/views.py
```python
from django.db.models import Count
from django.core.paginator import EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.shortcuts import (
redirect,
render_to_response,
get_list_or_404,
get_object_or_404
)
from django.template import RequestContext
from django.utils.http import urlquote, urlunquote
from core.pagination import Pages
from cves.models import Alert
from .models import Item, Watch
import re
PER_PAGE = getattr(settings, 'MAX_PER_PAGE', 100)
ALLOWED_CHARS = re.compile(
r'[a-zA-Z0-9\-"\'`\|~!@#\$%\^&\'\*\(\)_\[\]{};:,\.<>=\+]+"'
)
def get_part(request):
part = request.GET.get('part', None)
if not part in ('a', 'o', 'h'):
part = 'a'
return part
def get_val(request, val):
val = urlunquote(request.GET.get(val, None))
if val is not None:
val = re.sub(ALLOWED_CHARS, '', val)
return val
def index(request, level='part'):
part = None
if level == 'part':
item_list = Item.objects.values('part').order_by('part').annotate(
count=Count('vendor'))
q_dict = {}
next_level = 'vendor'
part = None
vendor = None
elif level == 'vendor':
part = get_part(request)
item_list = Item.objects.filter(part=part).values(
'vendor').order_by('vendor').annotate(count=Count('product'))
q_dict = {'part': part}
next_level = 'product'
vendor = None
elif level == 'product':
part = get_part(request)
vendor = get_val(request, 'vendor')
q_dict = {
'part': part,
'vendor': vendor
}
if not Item.objects.filter(part=part, vendor=vendor).exists():
raise Http404("No products exist.")
item_list = Item.objects.filter(
part=part, vendor=vendor).values(
'product').order_by('product').annotate(count=Count('id'))
next_level = None
paginator = Pages(item_list.all(), PER_PAGE)
page = int(request.GET.get('page', 1))
try:
objects = paginator.pages.page(page)
except PageNotAnInteger:
objects = paginator.pages.page(1)
except EmptyPage:
objects = paginator.pages.page(paginator.pages.num_pages)
new_objects = []
for obj in objects:
new_objects.append({
'obj': obj.get(level),
'count': obj.get('count'),
'url': urlquote(obj.get(level), safe=None)
})
objects.object_list = new_objects
return render_to_response(
'cpes/index.html',
RequestContext(
request,
{
'part': part,
'vendor': vendor,
'objects': objects,
'level': level,
'q_dict': q_dict,
'next_level': next_level,
'pages': paginator.pages_to_show(page)
}
)
)
def version_index(request):
part = get_part(request)
vendor = get_val(request, 'vendor')
product = get_val(request, 'product')
if part is None or vendor is None or product is None:
raise Http404('No product found.')
objects = get_list_or_404(Item.objects.only(
'part', 'vendor', 'product', 'pk', 'cpe23_wfn'
), part=part, vendor=vendor, product=product)
q_dict = {
'part': part, 'vendor': vendor, 'product': product
}
can_watch = request.user.is_authenticated()
has_watch = None
if can_watch:
has_watch = Watch.objects.filter(**q_dict).filter(
users=request.user).exists()
return render_to_response(
'cpes/version_index.html',
RequestContext(
request,
{
'part': part,
'vendor': vendor,
'product': product,
'objects': objects,
'q_dict': q_dict,
'can_watch': can_watch,
'has_watch': has_watch
}
)
)
@login_required
def watch_toggle(request):
part = get_part(request)
vendor = get_val(request, 'vendor')
product = get_val(request, 'product')
if part is None or vendor is None or product is None:
raise Http404('No product found.')
w, created = Watch.objects.get_or_create(
part=part,
vendor=vendor,
product=product
)
if created:
w.users.add(request.user)
messages.success(request, 'Watch created')
else:
if w.users.filter(pk=request.user.pk).exists():
w.users.remove(request.user)
messages.warning(request, 'Watch removed')
else:
w.users.add(request.user)
messages.success(request, 'Watch created')
return redirect(
'{0}?part={1}&vendor={2}&product={3}'.format(
reverse('cpes:version_index'),
urlquote(part),
urlquote(vendor),
urlquote(product)
)
)
```
#### File: management/commands/load_CVE_feed.py
```python
from django.core.management.base import BaseCommand
from django.conf import settings
from django.utils.timezone import now
from cpes.models import Item
from cpes.management.commands.utils import get_remote_dict, fast_iter
from cves.models import (
VulnerabilityDictionary as Dictionary,
Vulnerability,
Alert
)
from .utils import (
Updater,
get_xpath,
get_xpath_date,
get_refrences,
get_vuln_item,
FEED_SCHEMA
)
from os.path import join
from lxml import etree
import time
FULL_CVSS_URL = getattr(
settings,
'FULL_CVSS_URL',
'https://nvd.nist.gov/feeds/xml/cve/nvdcve-2.0-2015.xml.gz'
)
MODIFIED_CVSS_URL = getattr(
settings,
'MODIFIED_CVSS_URL',
'https://nvd.nist.gov/feeds/xml/cve/nvdcve-2.0-Modified.xml.gz'
)
def parse_cves_full(element, updater):
updater.add_item(
Item.objects.filter(
cpe22_wfn__in=get_xpath(
element,
'a:vulnerable-software-list/a:product/text()'
)
).values_list('pk', flat=True),
Vulnerability(
cve_id=element.get('id'),
published=get_xpath_date(
element, 'a:published-datetime/text()'),
modified=get_xpath_date(
element, 'a:last-modified-datetime/text()'),
cwe=get_xpath(element, 'a:cwe/@id', False),
summary=get_xpath(element, 'a:summary/text()', False),
references=[x for x in get_refrences(
get_xpath(element, 'a:references/a:reference'))],
cvss_base_score=get_xpath(
element, 'a:cvss/b:base_metrics/b:score/text()', False),
cvss_access_vector=get_xpath(
element,
'a:cvss/b:base_metrics/b:access-vector/text()',
False
),
cvss_access_complexity=get_xpath(
element,
'a:cvss/b:base_metrics/b:access-complexity/text()',
False
),
cvss_authentication=get_xpath(
element,
'a:cvss/b:base_metrics/b:authentication/text()',
False
),
cvss_confidentiality_impact=get_xpath(
element,
'a:cvss/b:base_metrics/b:confidentiality-impact/text()',
False
),
cvss_integrity_impact=get_xpath(
element,
'a:cvss/b:base_metrics/b:integrity-impact/text()',
False
),
cvss_availability_impact=get_xpath(
element,
'a:cvss/b:base_metrics/b:availability-impact/text()',
False
),
cvss_generated=get_xpath_date(
element,
'a:cvss/b:base_metrics/b:generated-on-datetime/text()'
),
dictionary=updater.update_obj
)
)
def parse_cves_update(element, updater):
published = get_xpath_date(element, 'a:published-datetime/text()')
modified = get_xpath_date(element, 'a:last-modified-datetime/text()')
if published > updater.latest or \
(modified is not None and modified > updater.latest):
cve_id = element.get('id')
try:
cve = Vulnerability.objects.get(cve_id=cve_id)
if modified > cve.modified:
cve.__dict__.update(
get_vuln_item(
element, cve_id, published, modified, updater
)
)
cve.save()
cve.cpes.clear()
items = Item.objects.filter(
cpe22_wfn=get_xpath(
element, 'a:vulnerable-software-list/a:product/text()'
)
).values_list('pk', flat=True)
cve.cpes.add(*list(items))
updater.increment('num_updated')
else:
updater.increment('num_not_updated')
except Vulnerability.DoesNotExist:
updater.add_item(
Item.objects.filter(
cpe22_wfn__in=get_xpath(
element,
'a:vulnerable-software-list/a:product/text()'
)
).values_list('pk', flat=True),
Vulnerability(
**get_vuln_item(
element, cve_id, published, modified, updater
)
)
)
else:
updater.increment('num_not_updated')
class Command(BaseCommand):
args = '<cve_file_name>'
help = 'Populates/Updates the CVE Database'
def add_arguments(self, parser):
parser.add_argument(
'--full',
action='store_true',
dest='full',
default=False,
help='Full database update?')
def handle(self, *args, **options):
self.verbosity = options.get('verbosity')
current_date = now()
file_path = join(
getattr(settings, 'MEDIA_ROOT', ''),
'data',
'cvss-dict-%s.xml.gz' % (current_date.strftime('%Y%m%d'))
)
if options['full']:
url = FULL_CVSS_URL
else:
url = MODIFIED_CVSS_URL
try:
d = Dictionary.objects.latest()
self.stdout.write(
'Previous dictionary found with date: %s' % d.last_modified)
is_created, new_created, new_etag = get_remote_dict(
url,
file_path,
d.last_modified,
d.etag or None,
self.verbosity,
self.stdout
)
except Dictionary.DoesNotExist:
if self.verbosity >= 2:
self.stdout.write('No previous dictionary found.')
is_created, new_created, new_etag = get_remote_dict(
url,
file_path,
None,
None,
self.verbosity,
self.stdout
)
if is_created:
file_path = file_path[:-3]
update = Dictionary.objects.create(
dictionary_file=file_path,
start=float(time.time()),
last_modified=new_created,
etag=new_etag
)
updater = Updater(update, Vulnerability)
if self.verbosity >= 2:
self.stdout.write('Count fields are %s' % ', '.join(
updater.count_fields.keys()
))
self.stdout.write('Parsing ' + file_path)
context = etree.iterparse(
file_path, events=('end', ), tag=FEED_SCHEMA + 'entry')
if options['full']:
if self.verbosity >= 2:
self.stdout.write('Full database populate.')
fast_iter(context, parse_cves_full, updater)
else:
if self.verbosity >= 2:
self.stdout.write('Database update only.')
fast_iter(context, parse_cves_update, updater)
updater.save()
if self.verbosity >= 2:
self.stdout.write('Done parsing.')
update.num_updated = updater.get_count('num_updated')
update.num_items = updater.total
update.num_not_updated = updater.get_count('num_not_updated')
update.duration = float(time.time()) - update.start
update.save()
``` |
{
"source": "jobscry/vz-blog",
"score": 2
} |
#### File: vz-blog/posts/admin.py
```python
from django.contrib import admin
from posts.models import Post
from posts.forms import PostAdminForm
class PostAdmin(admin.ModelAdmin):
class Media:
css = {
'all': (
"js/markitup/skins/markitup/style.css",
"js/markitup/sets/markdown/style.css"
)
}
js = (
"js/jquery.js",
"js/markitup/jquery.markitup.pack.js",
"js/markitup/sets/markdown/set.js"
"js/markitup.js"
)
form = PostAdminForm
list_display = ('title', 'author', 'is_published', 'published_on', 'created_on')
prepopulated_fields = {"slug": ("title",)}
list_filter = ('author', 'is_published')
search_fields = ['title', 'body', 'is_published',]
exclude = ('author', )
date_hierarchy = 'published_on'
save_on_top = True
fieldsets = (
(None, {
'fields': ('title', 'slug', 'tags', 'body')
}),
('Publishing Options', {
'fields': ('is_published', 'published_on', 'update_pingbacks')
}),
)
def save_model(self, request, obj, form, change):
obj.author = request.user
obj.save()
admin.site.register(Post, PostAdmin)
```
#### File: vz-blog/utils/pingback.py
```python
__author__ = "<NAME> <<EMAIL>>"
__date__ = "2003-01-26"
__version__ = "2003.01.26.01"
import re
import urllib2
import xmlrpclib
from HTMLParser import HTMLParser
def _reSTLinks(txt):
reSTLink = re.compile("\n\\.\\.\\s+[^\n:]+:\s+(http://[^\n]+)", re.I)
linkMatches = reSTLink.findall(txt)
return linkMatches
class _LinkExtractor(HTMLParser, object):
def __init__(self, links):
super(_LinkExtractor, self).__init__()
self.links = links
def handle_starttag(self, tag, attrs):
if tag == "a":
for key, value in attrs:
if key == "href" and value.startswith("http://"):
self.links.append(value)
def _htmlLinks(txt):
links = []
le = _LinkExtractor(links)
le.feed(txt)
le.close()
return links
def _htmlPingbackURI(fileObj):
"""Given an interable object returning text, search it for a pingback URI
based upon the search parameters given by the pingback specification.
Namely, it should match the regex:
<link rel="pingback" href="([^"]+)" ?/?>
(source: http://www.hixie.ch/specs/pingback/pingback)
We could search the text using an actual HTML parser easily enough, or
expand the regex to be a little more forgiving, but for the moment we'll
follow the spec."""
regex = re.compile('<link rel="pingback" href="([^"]+)" ?/?>', re.I) # might as well be case-insensitive
for line in fileObj:
m = regex.search(line)
if m != None:
uri = m.group(1)
# The pingback spec demands we expand the four allowed entities,
# but no more.
uri = uri.replace("<", "<")
uri = uri.replace(">", ">")
uri = uri.replace(""", '"')
uri = uri.replace("&", "&")
return uri
return None
def _determinePingbackURI(uri):
"""Attempt to determine the pingback URI of the given url object. First
we try to find the X-Pingback server header, and then we resort to
messier means if necessary. See _htmlPingbackURI for those means."""
try:
pingbackURI = uri.info()['X-Pingback']
return pingbackURI
except KeyError:
return _htmlPingbackURI(uri)
def _performXMLRPCQuery(sourceURI, targetURI, serverURI):
rpcserver = xmlrpclib.ServerProxy(serverURI)
rpcserver.pingback.ping(sourceURI, targetURI)
def pingback(sourceURI, targetURI):
"""Attempts to notify the server of targetURI that sourceURI refers to
it."""
url = urllib2.urlopen(targetURI)
pingback = _determinePingbackURI(url)
if pingback == None:
return
_performXMLRPCQuery(sourceURI, targetURI, pingback)
def autoPingback(sourceURI, reST = None, HTML = None):
"""Scans the input text, which can be in either reStructuredText or HTML
format, pings every linked website for auto-discovery-capable pingback
servers, and does an appropriate pingback.
The following specification details how this code should work:
http://www.hixie.ch/specs/pingback/pingback"""
assert reST != None or HTML != None
if reST != None:
links = _reSTLinks(reST)
else:
links = _htmlLinks(HTML)
for link in links:
pingback(sourceURI = sourceURI, targetURI = link)
``` |
{
"source": "jobsfan/pytorch",
"score": 3
} |
#### File: pytorch/liuer/12-5.py
```python
import torch
num_class = 4
input_size = 4
hidden_size = 8
embedding_size = 10
num_layers = 2
batch_size = 1
seq_len = 5
idx2char = ['e', 'h', 'l', 'o']
x_data = [[1, 0, 2, 2, 3]] # (batch, seq_len)
y_data = [3, 1, 2, 3, 2] # (batch * seq_len)
inputs = torch.LongTensor(x_data)
labels = torch.LongTensor(y_data)
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.emb = torch.nn.Embedding(input_size, embedding_size)
self.rnn = torch.nn.RNN(input_size=embedding_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=True)
self.fc = torch.nn.Linear(hidden_size, num_class)
def forward(self, x):
hidden = torch.zeros(num_layers, x.size(0), hidden_size)
x = self.emb(x) # (batch, seqlen, embeddingsize)
x, _ = self.rnn(x, hidden)
x = self.fc(x)
return x.view(-1, num_class)
net = Model()
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.05)
for epoch in range(15):
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
_, idx = outputs.max(dim=1)
idx = idx.data.numpy()
print('Predicted: ', ''.join([idx2char[x] for x in idx]), end='')
print(', Epoch [%d/15] loss = %.3f' % (epoch+1, loss.item()))
```
#### File: pytorch/liuer/8.py
```python
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import numpy as np
class DiabetesDataset(Dataset):
def __init__(self,filepath):
xy = np.loadtxt(filepath,delimiter=',',dtype=np.float32)
self.len = xy.shape[0]
self.x_data = torch.from_numpy(xy[:,:-1])
self.y_data = torch.from_numpy(xy[:,[-1]])
def __getitem__(self, index):
return self.x_data[index],self.y_data[index]
def __len__(self):
return self.len
dataset = DiabetesDataset('diabetes.csv.gz')
train_loader = DataLoader(dataset=dataset,batch_size=32,shuffle=True,num_workers=2)
class Model(torch.nn.Module):
def __init__(self):
super(Model,self).__init__()
self.linear1 = torch.nn.Linear(10,8)
self.linear2 = torch.nn.Linear(8,6)
self.linear3 = torch.nn.Linear(6,4)
self.linear4 = torch.nn.Linear(4,1)
self.sigmoid = torch.nn.Sigmoid()
def forward(self,x):
x = self.sigmoid(self.linear1(x))
x = self.sigmoid(self.linear2(x))
x = self.sigmoid(self.linear3(x))
x = self.sigmoid(self.linear4(x))
return x
model = Model()
criterion = torch.nn.BCELoss(reduction='mean')
optimizer = torch.optim.SGD(model.parameters(),lr=0.1)
for epoch in range(100):
for i,data in enumerate(train_loader,0):
inputs,labels = data
y_pred = model(inputs)
loss = criterion(y_pred,labels)
print(epoch,i,loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
```
#### File: pytorch/liuer/9-2.py
```python
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
batch_size = 64
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,),(0.3081,))
])
train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True,download=True,transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True,batch_size=batch_size)
test_dataset = datasets.MNIST(root='../dataset/mnist/',train=False,download=True,transform=transform)
test_loader = DataLoader(test_dataset,shuffle=False,batch_size=batch_size)
class Net(torch.nn.Module):
def __init__(self):
super(Net,self).__init__()
self.l1 = torch.nn.Linear(784,512)
self.l2 = torch.nn.Linear(512,256)
self.l3 = torch.nn.Linear(256,128)
self.l4 = torch.nn.Linear(128,64)
self.l5 = torch.nn.Linear(64,10)
def forward(self,x):
x = x.view(-1,784)
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = F.relu(self.l3(x))
x = F.relu(self.l4(x))
return self.l5(x)
model = Net()
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),lr=0.01,momentum=0.5)
def train(epoch):
running_loss = 0.0
for batch_idx, data in enumerate(train_loader,0):
inputs, target = data
optimizer.zero_grad()
# forward + backward + update
outputs = model(inputs)
loss = criterion(outputs,target)
loss.backward()
optimizer.step()
running_loss += loss.item()
if batch_idx % 300 == 299:
print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
running_loss = 0.0
def test():
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data
outputs = model(images)
_, predicted = torch.max(outputs.data,dim=1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy on test set: %d %%' % (100 * correct / total))
if __name__ == '__main__':
for epoch in range(10):
train(epoch)
test()
``` |
{
"source": "jobsifpb/jobs",
"score": 2
} |
#### File: modules/board/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from django.core.validators import MaxValueValidator, MinValueValidator
class Job(models.Model):
'''
Modelo que representa as vagas cadastradas
'''
title = models.CharField(max_length=255)
description = models.TextField()
salary_min = models.DecimalField(max_digits=10, decimal_places=2)
salary_max = models.DecimalField(max_digits=10, decimal_places=2)
workload = models.PositiveIntegerField(validators =[MinValueValidator(0), MaxValueValidator(45)])
validity = models.DateTimeField()
open_job = models.BooleanField(default=True)
internal_job = models.BooleanField(default=False)
author = models.ForeignKey(User)
featured = models.BooleanField(default=False)
class Meta:
verbose_name = 'Vaga'
verbose_name_plural = 'Vagas'
def __str__(self):
return self.title
```
#### File: board/tests/tests.py
```python
from django.test import TestCase
from django.test import Client
from django.core.urlresolvers import reverse
from model_mommy import mommy
from modules.board.models import Job
class HomeTest(TestCase):
'''
Tests of board home view
'''
def setUp(self):
self.job = mommy.make(Job, title='Vaga de estágio')
self.response_home = self.client.get(reverse('home'))
self.cliente = Client()
def test_home_response_200(self):
self.assertEqual(self.response_home.status_code, 200)
def test_home_context_with_one_job(self):
self.assertEqual(len(self.response_home.context['jobs']), 1)
class JobTest(TestCase):
'''
Tests of board models
'''
def setUp(self):
self.job = mommy.make(Job, title='Vaga de estágio')
def test_create_job(self):
self.assertEqual(self.job.__str__(), 'Vaga de estágio')
```
#### File: modules/board/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from .forms import JobForm
from .models import Job
def home(request):
'''
Home page of our board
It show a list of Positions in IFPB or other companies
'''
user = request.user
jobs = Job.objects.all()
return render(request, 'home.html', locals())
``` |
{
"source": "jobsta/albumapp-flask",
"score": 2
} |
#### File: app/views/album.py
```python
import datetime
import json
from app.models import db
from app.models.utils import create_album_report_template, get_menu_items, json_default
from flask import abort, Blueprint, redirect, render_template, Response, request, url_for
from flask import current_app as app
from flask_babel import _
from sqlalchemy import func, select
album_bp = Blueprint('album', __name__, url_prefix='/album')
@album_bp.route('/data/')
def data():
"""Returns available albums from the database. Can be optionally filtered by year.
This is called from templates/album/index.html when the year input is changed.
"""
year = request.args.get('year')
if year:
try:
year = int(year)
except (ValueError, TypeError):
abort(400, 'invalid year parameter')
else:
year = None
return json.dumps(get_albums(year), default=json_default)
@album_bp.route('/edit/')
@album_bp.route('/edit/<int:album_id>')
def edit(album_id=None):
"""Shows an edit form to add new or edit an existing album."""
db_engine = db.get_db()
rv = dict()
rv['menu_items'] = get_menu_items('album')
if album_id:
album = db_engine.execute(
select([db.album]).where(db.album.c.id == album_id)).fetchone()
if not album:
redirect(url_for('album.index'))
rv['is_new'] = False
rv['album'] = json.dumps(dict(album))
else:
rv['is_new'] = True
rv['album'] = json.dumps(dict(id='', name='', year=None, best_of_compilation=False))
return render_template('album/edit.html', **rv)
@album_bp.route('/')
@album_bp.route('/index')
def index():
"""Shows a page where all available albums are listed."""
rv = dict()
rv['menu_items'] = get_menu_items('album')
rv['albums'] = json.dumps(get_albums(), default=json_default)
return render_template('album/index.html', **rv)
@album_bp.route('/report/')
def report():
"""Prints a pdf file with all available albums.
The albums can be optionally filtered by year. reportbro-lib is used to
generate the pdf file. The data itself is retrieved
from the database (*get_albums*). The report_definition
is also stored in the database and is created on-the-fly if not present (to make
this Demo App easier to use).
"""
from reportbro import Report, ReportBroError
year = request.args.get('year')
if year:
try:
year = int(year)
except (ValueError, TypeError):
abort(400, 'invalid year parameter')
else:
year = None
db_engine = db.get_db()
# NOTE: these params must match exactly with the parameters defined in the
# report definition in ReportBro Designer, check the name and type (Number, Date, List, ...)
# of those parameters in the Designer.
params = dict(year=year, albums=get_albums(year), current_date=datetime.datetime.now())
report_count = db_engine.execute(
select([func.count(db.report_definition.c.id)]).\
where(db.report_definition.c.report_type == 'albums_report')).scalar()
if report_count == 0:
create_album_report_template()
report_definition = db_engine.execute(
select([db.report_definition.c.id, db.report_definition.c.report_definition]).\
where(db.report_definition.c.report_type == 'albums_report')).fetchone()
if not report_definition:
raise abort(500, 'no report_definition available')
try:
report_inst = Report(report_definition['report_definition'], params)
if report_inst.errors:
# report definition should never contain any errors,
# unless you saved an invalid report and didn't test in ReportBro Designer
raise ReportBroError(report_inst.errors[0])
pdf_report = report_inst.generate_pdf()
response = Response()
response.headers['Content-Type'] = 'application/pdf'
response.headers['Content-Disposition'] = 'inline; filename="albums.pdf"'
response.set_data(pdf_report)
return response
except ReportBroError as ex:
app.logger.error(ex.error)
abort(500, 'report error: ' + str(ex.error))
except Exception as ex:
abort(500, 'report exception: ' + str(ex))
@album_bp.route('/save', methods=['POST'])
def save():
"""Saves a music album in the db."""
db_engine = db.get_db()
json_data = request.json
if json_data is None:
abort(400, 'invalid request values')
album = json_data.get('album')
if not isinstance(album, dict):
abort(400, 'invalid values')
album_id = None
if album.get('id'):
try:
album_id = int(album.get('id'))
except (ValueError, TypeError):
abort(400, 'invalid album id')
values = dict(best_of_compilation=album.get('best_of_compilation'))
rv = dict(errors=[])
# perform some basic form validation
if not album.get('name'):
rv['errors'].append(dict(field='name', msg=str(_('error.the field must not be empty'))))
else:
values['name'] = album.get('name')
if not album.get('artist'):
rv['errors'].append(dict(field='artist', msg=str(_('error.the field must not be empty'))))
else:
values['artist'] = album.get('artist')
if album.get('year'):
try:
values['year'] = int(album.get('year'))
if values['year'] < 1900 or values['year'] > 2100:
rv['errors'].append(dict(field='year', msg=str(_('error.the field must contain a valid year'))))
except (ValueError, TypeError):
rv['errors'].append(dict(field='year', msg=str(_('error.the field must contain a number'))))
else:
values['year'] = None
if not rv['errors']:
# no validation errors -> save album
if album_id:
db_engine.execute(
db.album.update().where(db.album.c.id == album_id).values(**values))
else:
db_engine.execute(db.album.insert(), **values)
return json.dumps(rv)
def get_albums(year=None):
"""Returns available albums from the database. Can be optionally filtered by year."""
db_engine = db.get_db()
select_albums = select([db.album])
if year is not None:
select_albums = select_albums.where(db.album.c.year == year)
items = db_engine.execute(select_albums).fetchall()
# convert list of RowProxy's to list of dict items
return [dict(item) for item in items]
```
#### File: app/views/home.py
```python
from flask import Blueprint, redirect, url_for
home_bp = Blueprint('home', __name__, url_prefix='/home')
@home_bp.route('/')
@home_bp.route('/index')
def index():
return redirect(url_for('album.index'))
``` |
{
"source": "jobsta/albumapp-web2py",
"score": 3
} |
#### File: albumapp-web2py/controllers/album.py
```python
def data():
"""Returns available albums from the database. Can be optionally filtered by year.
This is called from views/album/index.html when the year input is changed.
"""
year = None
if request.vars.year:
try:
year = int(request.vars.year)
except (ValueError, TypeError):
raise HTTP(400, 'invalid year parameter')
return json.dumps(get_albums(year), default=json_default)
def edit():
"""Shows an edit form to add new or edit an existing album."""
rv = dict()
rv['menu_items'] = get_menu_items('album')
if request.args(0):
try:
album_id = int(request.args(0))
except (ValueError, TypeError):
raise HTTP(400, 'invalid argument')
album = db(db.album.id == album_id).select(db.album.ALL).first()
if not album:
redirect(URL('album', 'index'))
rv['is_new'] = False
rv['album'] = json.dumps(album.as_dict())
else:
rv['is_new'] = True
rv['album'] = json.dumps(dict(id='', name='', year=None, best_of_compilation=False))
return rv
def index():
"""Shows a page where all available albums are listed."""
rv = dict()
rv['menu_items'] = get_menu_items('album')
rv['albums'] = json.dumps(get_albums(), default=json_default)
return rv
def report():
"""Prints a pdf file with all available albums.
The albums can be optionally filtered by year. reportbro-lib is used to
generate the pdf file. The data itself is retrieved
from the database (*get_albums*). The report_definition
is also stored in the database and is created on-the-fly if not present (to make
this Demo App easier to use).
"""
from reportbro import Report, ReportBroError
year = None
if request.vars.year:
try:
year = int(request.vars.year)
except (ValueError, TypeError):
raise HTTP(400, 'invalid year parameter')
# NOTE: these params must match exactly with the parameters defined in the
# report definition in ReportBro Designer, check the name and type (Number, Date, List, ...)
# of those parameters in the Designer.
params = dict(year=year, albums=get_albums(year), current_date=request.now)
if db(db.report_definition.report_type == 'albums_report').count() == 0:
create_album_report_template()
report_definition = db(db.report_definition.report_type == 'albums_report').select(
db.report_definition.id, db.report_definition.report_definition).first()
if not report_definition:
raise HTTP(500, 'no report_definition available')
try:
report = Report(report_definition.report_definition, params)
if report.errors:
# report definition should never contain any errors,
# unless you saved an invalid report and didn't test in ReportBro Designer
raise ReportBroError(report.errors[0])
pdf_report = report.generate_pdf()
response.headers['Content-Type'] = 'application/pdf'
response.headers['Content-Disposition'] = 'inline; filename="albums.pdf"'
return pdf_report
except ReportBroError as ex:
raise HTTP(500, 'report error: ' + str(ex.error))
except Exception as ex:
raise HTTP(500, 'report exception: ' + str(ex))
def save():
"""Saves a music album in the db."""
album = request.vars.album
if not isinstance(album, dict):
raise HTTP(400, 'invalid values')
album_id = None
if album.get('id'):
try:
album_id = int(album.get('id'))
except (ValueError, TypeError):
raise HTTP(400, 'invalid album id')
values = dict(best_of_compilation=album.get('best_of_compilation'))
rv = dict(errors=[])
# perform some basic form validation
if not album.get('name'):
rv['errors'].append(dict(field='name', msg=str(T('error.the field must not be empty'))))
else:
values['name'] = album.get('name')
if not album.get('artist'):
rv['errors'].append(dict(field='artist', msg=str(T('error.the field must not be empty'))))
else:
values['artist'] = album.get('artist')
if album.get('year'):
try:
values['year'] = int(album.get('year'))
if values['year'] < 1900 or values['year'] > 2100:
rv['errors'].append(dict(field='year', msg=str(T('error.the field must contain a valid year'))))
except (ValueError, TypeError):
rv['errors'].append(dict(field='year', msg=str(T('error.the field must contain a number'))))
else:
values['year'] = None
if not rv['errors']:
# no validation errors -> save album
if album_id:
db(db.album.id == album_id).update(**values)
else:
db.album.insert(**values)
return json.dumps(rv)
def get_albums(year=None):
"""Returns available albums from the database. Can be optionally filtered by year.
This function is not callable from web request (because function has a parameter),
only used within this controller.
"""
filter_expr = (db.album.id > 0) if year is None else (db.album.year == year)
return db(filter_expr).select(db.album.ALL, orderby=db.album.name).as_list()
``` |
{
"source": "jobsweeper/pysweeper",
"score": 2
} |
#### File: pysweeper/tests/test_cli.py
```python
import unittest
import os
import subprocess
from scisweeper.scisweeper import SciSweeperJob
file_location = os.path.dirname(os.path.abspath(__file__))
class BashSciSweeper(SciSweeperJob):
@property
def executable(self):
return "bash " + os.path.join(file_location, "executable", "test.sh")
@staticmethod
def write_input(input_dict, working_directory="."):
import os
from jinja2 import Template
template = Template("{{value_1}} {{value_2}} {{value_3}}")
template_str = template.render(
value_1=input_dict["value_1"],
value_2=input_dict["value_2"],
value_3=input_dict["value_3"],
)
with open(os.path.join(working_directory, "input_file"), "w") as f:
f.writelines(template_str)
@staticmethod
def collect_output(working_directory="."):
import os
with open(os.path.join(working_directory, "output.log"), "r") as f:
output = f.readlines()
return {"result": [int(o) for o in output]}
class TestSciSweeperCli(unittest.TestCase):
def test_cli_run(self):
os.makedirs("calc_test_cli")
self.path_job = os.path.join(file_location, "calc_test_cli", "job")
self.job = BashSciSweeper(
working_directory=self.path_job,
input_dict={"value_1": 1, "value_2": 2, "value_3": 3},
)
self.job.to_hdf()
subprocess.check_output(
"python -m scisweeper.cli -p " + self.path_job,
cwd=file_location,
shell=True,
universal_newlines=True,
)
self.job.from_hdf()
self.assertEqual(self.job.output_dict["result"][0], 7)
self.assertEqual(self.job.output_dict["result"][1], 1)
os.remove(os.path.join(file_location, "calc_test_cli", "job", "input_file"))
os.remove(os.path.join(file_location, "calc_test_cli", "job", "output.log"))
os.remove(os.path.join(file_location, "calc_test_cli", "job", "scisweeper.h5"))
os.removedirs(os.path.join(file_location, "calc_test_cli", "job"))
def test_bash_sci_sweeper(self):
self.path_job = os.path.join(file_location, "calc_test_job", "job")
self.job = BashSciSweeper(
working_directory=self.path_job,
input_dict={"value_1": 1, "value_2": 2, "value_3": 3},
)
self.job.run()
self.assertEqual(self.job.output_dict["result"][0], 7)
self.assertEqual(self.job.output_dict["result"][1], 1)
os.remove(os.path.join(file_location, "calc_test_job", "job", "input_file"))
os.remove(os.path.join(file_location, "calc_test_job", "job", "output.log"))
os.remove(os.path.join(file_location, "calc_test_job", "job", "scisweeper.h5"))
os.removedirs(os.path.join(file_location, "calc_test_job", "job"))
def test_error(self):
out = subprocess.check_output(
"python -m scisweeper.cli -x",
cwd=file_location,
shell=True,
universal_newlines=True,
)
self.assertIn("cli.py --p <path>", out)
def test_help(self):
out = subprocess.check_output(
"python -m scisweeper.cli -h",
cwd=file_location,
shell=True,
universal_newlines=True,
)
self.assertIn("cli.py --p <path>", out)
``` |
{
"source": "jobtalle/AtlasExtruder",
"score": 3
} |
#### File: jobtalle/AtlasExtruder/atlas_extrude.py
```python
import json
from PIL import Image
def extrude(image, sprites):
im = Image.open(image)
pixels = im.load()
atlas = json.load(open(sprites, "r"))
for frame in atlas["frames"]:
x = atlas["frames"][frame]["frame"]["x"]
y = atlas["frames"][frame]["frame"]["y"]
width = atlas["frames"][frame]["sourceSize"]["w"]
height = atlas["frames"][frame]["sourceSize"]["h"]
pixels[x - 1, y - 1] = pixels[x, y]
pixels[x - 1, y + height] = pixels[x, y + height - 1]
pixels[x + width, y - 1] = pixels[x + width - 1, y]
pixels[x + width, y + height] = pixels[x + width - 1, y + height - 1]
for y in range(y, y + height):
pixels[x - 1, y] = pixels[x, y]
pixels[x + width, y] = pixels[x + width - 1, y]
for x in range(x, x + width):
pixels[x, y + 1] = pixels[x, y]
pixels[x, y - height] = pixels[x, y - height + 1]
im.save(image)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.