prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>test_certificates.py<|end_file_name|><|fim▁begin|>#-*- coding: utf-8 -*- """ Group Configuration Tests. """ import json import mock import ddt from django.conf import settings from django.test.utils import override_settings from opaque_keys.edx.keys import AssetKey from opaque_keys.edx.locations import AssetLocation from contentstore.utils import reverse_course_url from contentstore.views.certificates import CERTIFICATE_SCHEMA_VERSION from contentstore.tests.utils import CourseTestCase from xmodule.contentstore.django import contentstore from xmodule.contentstore.content import StaticContent from xmodule.exceptions import NotFoundError from student.models import CourseEnrollment from student.roles import CourseInstructorRole, CourseStaffRole from student.tests.factories import UserFactory from course_modes.tests.factories import CourseModeFactory from contentstore.views.certificates import CertificateManager from django.test.utils import override_settings from contentstore.utils import get_lms_link_for_certificate_web_view from util.testing import EventTestMixin FEATURES_WITH_CERTS_ENABLED = settings.FEATURES.copy() FEATURES_WITH_CERTS_ENABLED['CERTIFICATES_HTML_VIEW'] = True CERTIFICATE_JSON = { u'name': u'Test certificate', u'description': u'Test description', u'is_active': True, u'version': CERTIFICATE_SCHEMA_VERSION, } CERTIFICATE_JSON_WITH_SIGNATORIES = { u'name': u'Test certificate', u'description': u'Test description', u'version': CERTIFICATE_SCHEMA_VERSION, u'course_title': 'Course Title Override', u'is_active': True, u'signatories': [ { "name": "Bob Smith", "title": "The DEAN.", "signature_image_path": "/c4x/test/CSS101/asset/Signature.png" } ] } # pylint: disable=no-member class HelperMethods(object): """ Mixin that provides useful methods for certificate configuration tests. """ def _create_fake_images(self, asset_keys): """ Creates fake image files for a list of asset_keys. """ for asset_key_string in asset_keys: asset_key = AssetKey.from_string(asset_key_string) content = StaticContent( asset_key, "Fake asset", "image/png", "data", ) contentstore().save(content) def _add_course_certificates(self, count=1, signatory_count=0, is_active=False): """ Create certificate for the course. """ signatories = [ { 'name': 'Name ' + str(i), 'title': 'Title ' + str(i), 'signature_image_path': '/c4x/test/CSS101/asset/Signature{}.png'.format(i), 'id': i } for i in xrange(0, signatory_count) ] # create images for signatory signatures except the last signatory for idx, signatory in enumerate(signatories): if len(signatories) > 2 and idx == len(signatories) - 1: continue else: self._create_fake_images([signatory['signature_image_path']]) certificates = [ { 'id': i, 'name': 'Name ' + str(i), 'description': 'Description ' + str(i), 'signatories': signatories, 'version': CERTIFICATE_SCHEMA_VERSION, 'is_active': is_active } for i in xrange(0, count) ] self.course.certificates = {'certificates': certificates} self.save_course() # pylint: disable=no-member class CertificatesBaseTestCase(object): """ Mixin with base test cases for the certificates. """ def _remove_ids(self, content): """ Remove ids from the response. We cannot predict IDs, because they're generated randomly. We use this method to clean up response when creating new certificate. """ certificate_id = content.pop("id") return certificate_id def test_required_fields_are_absent(self): """ Test required fields are absent. """ bad_jsons = [ # must have name of the certificate { u'description': 'Test description', u'version': CERTIFICATE_SCHEMA_VERSION }, # an empty json {}, ] for bad_json in bad_jsons: response = self.client.post( self._url(), data=json.dumps(bad_json), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest" ) self.assertEqual(response.status_code, 400) self.assertNotIn("Location", response) content = json.loads(response.content) self.assertIn("error", content) def test_invalid_json(self): """ Test invalid json handling. """ # Invalid JSON. invalid_json = "{u'name': 'Test Name', u'description': 'Test description'," \ " u'version': " + str(CERTIFICATE_SCHEMA_VERSION) + ", []}" response = self.client.post( self._url(), data=invalid_json, content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest" ) self.assertEqual(response.status_code, 400) self.assertNotIn("Location", response) content = json.loads(response.content) self.assertIn("error", content) def test_certificate_data_validation(self): #Test certificate schema version json_data_1 = { u'version': 100, u'name': u'Test certificate', u'description': u'Test description' } with self.assertRaises(Exception) as context: CertificateManager.validate(json_data_1) self.assertTrue("Unsupported certificate schema version: 100. Expected version: 1." in context.exception) #Test certificate name is missing json_data_2 = { u'version': CERTIFICATE_SCHEMA_VERSION, u'description': u'Test description' } with self.assertRaises(Exception) as context: CertificateManager.validate(json_data_2) self.assertTrue('must have name of the certificate' in context.exception) @ddt.ddt @override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED) class CertificatesListHandlerTestCase(EventTestMixin, CourseTestCase, CertificatesBaseTestCase, HelperMethods): """ Test cases for certificates_list_handler. """ def setUp(self): """ Set up CertificatesListHandlerTestCase. """ super(CertificatesListHandlerTestCase, self).setUp('contentstore.views.certificates.tracker') def _url(self): """ Return url for the handler. """ return reverse_course_url('certificates.certificates_list_handler', self.course.id) def test_can_create_certificate(self): """ Test that you can create a certificate. """ expected = { u'version': CERTIFICATE_SCHEMA_VERSION, u'name': u'Test certificate', u'description': u'Test description', u'is_active': True, u'signatories': [] } response = self.client.ajax_post( self._url(), data=CERTIFICATE_JSON ) self.assertEqual(response.status_code, 201) self.assertIn("Location", response) content = json.loads(response.content) certificate_id = self._remove_ids(content) self.assertEqual(content, expected) self.assert_event_emitted( 'edx.certificate.configuration.created', course_id=unicode(self.course.id), configuration_id=certificate_id, ) def test_cannot_create_certificate_if_user_has_no_write_permissions(self): """ Tests user without write permissions on course should not able to create certificate """ user = UserFactory() self.client.login(username=user.username, password='test') response = self.client.ajax_post( self._url(), data=CERTIFICATE_JSON ) self.assertEqual(response.status_code, 403) @override_settings(LMS_BASE=None) def test_no_lms_base_for_certificate_web_view_link(self): test_link = get_lms_link_for_certificate_web_view( user_id=self.user.id, course_key=self.course.id, mode='honor' ) self.assertEquals(test_link, None) @override_settings(LMS_BASE="lms_base_url") def test_lms_link_for_certificate_web_view(self): test_url = "//lms_base_url/certificates/user/" \ + str(self.user.id) + "/course/" + unicode(self.course.id) + '?preview=honor' link = get_lms_link_for_certificate_web_view( user_id=self.user.id, course_key=self.course.id, mode='honor' ) self.assertEquals(link, test_url) @mock.patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True}) def test_certificate_info_in_response(self): """ Test that certificate has been created and rendered properly. """ response = self.client.ajax_post( self._url(),<|fim▁hole|> ) self.assertEqual(response.status_code, 201) # in html response result = self.client.get_html(self._url()) self.assertIn('Test certificate', result.content) self.assertIn('Test description', result.content) # in JSON response response = self.client.get_json(self._url()) data = json.loads(response.content) self.assertEquals(len(data), 1) self.assertEqual(data[0]['name'], 'Test certificate') self.assertEqual(data[0]['description'], 'Test description') self.assertEqual(data[0]['version'], CERTIFICATE_SCHEMA_VERSION) def test_unsupported_http_accept_header(self): """ Test if not allowed header present in request. """ response = self.client.get( self._url(), HTTP_ACCEPT="text/plain", ) self.assertEqual(response.status_code, 406) def test_certificate_unsupported_method(self): """ Unit Test: test_certificate_unsupported_method """ resp = self.client.put(self._url()) self.assertEqual(resp.status_code, 405) def test_not_permitted(self): """ Test that when user has not read access to course then permission denied exception should raised. """ test_user_client, test_user = self.create_non_staff_authed_user_client() CourseEnrollment.enroll(test_user, self.course.id) response = test_user_client.ajax_post( self._url(), data=CERTIFICATE_JSON ) self.assertEqual(response.status_code, 403) self.assertIn("error", response.content) def test_audit_course_mode_is_skipped(self): """ Tests audit course mode is skipped when rendering certificates page. """ CourseModeFactory.create(course_id=self.course.id) CourseModeFactory.create(course_id=self.course.id, mode_slug='verified') response = self.client.get_html( self._url(), ) self.assertEqual(response.status_code, 200) self.assertContains(response, 'verified') self.assertNotContains(response, 'audit') def test_audit_only_disables_cert(self): """ Tests audit course mode is skipped when rendering certificates page. """ CourseModeFactory.create(course_id=self.course.id, mode_slug='audit') response = self.client.get_html( self._url(), ) self.assertEqual(response.status_code, 200) self.assertContains(response, 'This course does not use a mode that offers certificates.') self.assertNotContains(response, 'This module is not enabled.') self.assertNotContains(response, 'Loading') @ddt.data( ['audit', 'verified'], ['verified'], ['audit', 'verified', 'credit'], ['verified', 'credit'], ['professional'] ) def test_non_audit_enables_cert(self, slugs): """ Tests audit course mode is skipped when rendering certificates page. """ for slug in slugs: CourseModeFactory.create(course_id=self.course.id, mode_slug=slug) response = self.client.get_html( self._url(), ) self.assertEqual(response.status_code, 200) self.assertNotContains(response, 'This course does not use a mode that offers certificates.') self.assertNotContains(response, 'This module is not enabled.') self.assertContains(response, 'Loading') def test_assign_unique_identifier_to_certificates(self): """ Test certificates have unique ids """ self._add_course_certificates(count=2) json_data = { u'version': CERTIFICATE_SCHEMA_VERSION, u'name': u'New test certificate', u'description': u'New test description', u'is_active': True, u'signatories': [] } response = self.client.post( self._url(), data=json.dumps(json_data), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) new_certificate = json.loads(response.content) for prev_certificate in self.course.certificates['certificates']: self.assertNotEqual(new_certificate.get('id'), prev_certificate.get('id')) @ddt.ddt @override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED) class CertificatesDetailHandlerTestCase(EventTestMixin, CourseTestCase, CertificatesBaseTestCase, HelperMethods): """ Test cases for CertificatesDetailHandlerTestCase. """ _id = 0 def setUp(self): # pylint: disable=arguments-differ """ Set up CertificatesDetailHandlerTestCase. """ super(CertificatesDetailHandlerTestCase, self).setUp('contentstore.views.certificates.tracker') def _url(self, cid=-1): """ Return url for the handler. """ cid = cid if cid > 0 else self._id return reverse_course_url( 'certificates.certificates_detail_handler', self.course.id, kwargs={'certificate_id': cid}, ) def test_can_create_new_certificate_if_it_does_not_exist(self): """ PUT/POST new certificate. """ expected = { u'id': 666, u'version': CERTIFICATE_SCHEMA_VERSION, u'name': u'Test certificate', u'description': u'Test description', u'is_active': True, u'course_title': u'Course Title Override', u'signatories': [] } response = self.client.put( self._url(cid=666), data=json.dumps(expected), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) content = json.loads(response.content) self.assertEqual(content, expected) self.assert_event_emitted( 'edx.certificate.configuration.created', course_id=unicode(self.course.id), configuration_id=666, ) def test_can_edit_certificate(self): """ Edit certificate, check its id and modified fields. """ self._add_course_certificates(count=2) expected = { u'id': 1, u'version': CERTIFICATE_SCHEMA_VERSION, u'name': u'New test certificate', u'description': u'New test description', u'is_active': True, u'course_title': u'Course Title Override', u'signatories': [] } response = self.client.put( self._url(cid=1), data=json.dumps(expected), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) content = json.loads(response.content) self.assertEqual(content, expected) self.assert_event_emitted( 'edx.certificate.configuration.modified', course_id=unicode(self.course.id), configuration_id=1, ) self.reload_course() # Verify that certificate is properly updated in the course. course_certificates = self.course.certificates['certificates'] self.assertEqual(len(course_certificates), 2) self.assertEqual(course_certificates[1].get('name'), u'New test certificate') self.assertEqual(course_certificates[1].get('description'), 'New test description') def test_can_edit_certificate_without_is_active(self): """ Tests user should be able to edit certificate, if is_active attribute is not present for given certificate. Old courses might not have is_active attribute in certificate data. """ certificates = [ { 'id': 1, 'name': 'certificate with is_active', 'description': 'Description ', 'signatories': [], 'version': CERTIFICATE_SCHEMA_VERSION, } ] self.course.certificates = {'certificates': certificates} self.save_course() expected = { u'id': 1, u'version': CERTIFICATE_SCHEMA_VERSION, u'name': u'New test certificate', u'description': u'New test description', u'is_active': True, u'course_title': u'Course Title Override', u'signatories': [] } response = self.client.post( self._url(cid=1), data=json.dumps(expected), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 201) content = json.loads(response.content) self.assertEqual(content, expected) def test_can_delete_certificate_with_signatories(self): """ Delete certificate """ self._add_course_certificates(count=2, signatory_count=1) response = self.client.delete( self._url(cid=1), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 204) self.assert_event_emitted( 'edx.certificate.configuration.deleted', course_id=unicode(self.course.id), configuration_id='1', ) self.reload_course() # Verify that certificates are properly updated in the course. certificates = self.course.certificates['certificates'] self.assertEqual(len(certificates), 1) self.assertEqual(certificates[0].get('name'), 'Name 0') self.assertEqual(certificates[0].get('description'), 'Description 0') def test_delete_certificate_without_write_permissions(self): """ Tests certificate deletion without write permission on course. """ self._add_course_certificates(count=2, signatory_count=1) user = UserFactory() self.client.login(username=user.username, password='test') response = self.client.delete( self._url(cid=1), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 403) def test_delete_certificate_without_global_staff_permissions(self): """ Tests deletion of an active certificate without global staff permission on course. """ self._add_course_certificates(count=2, signatory_count=1, is_active=True) user = UserFactory() for role in [CourseInstructorRole, CourseStaffRole]: role(self.course.id).add_users(user) self.client.login(username=user.username, password='test') response = self.client.delete( self._url(cid=1), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 403) def test_update_active_certificate_without_global_staff_permissions(self): """ Tests update of an active certificate without global staff permission on course. """ self._add_course_certificates(count=2, signatory_count=1, is_active=True) cert_data = { u'id': 1, u'version': CERTIFICATE_SCHEMA_VERSION, u'name': u'New test certificate', u'description': u'New test description', u'course_title': u'Course Title Override', u'org_logo_path': '', u'is_active': False, u'signatories': [] } user = UserFactory() for role in [CourseInstructorRole, CourseStaffRole]: role(self.course.id).add_users(user) self.client.login(username=user.username, password='test') response = self.client.put( self._url(cid=1), data=json.dumps(cert_data), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 403) def test_delete_non_existing_certificate(self): """ Try to delete a non existing certificate. It should return status code 404 Not found. """ self._add_course_certificates(count=2) response = self.client.delete( self._url(cid=100), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 404) def test_can_delete_signatory(self): """ Delete an existing certificate signatory """ self._add_course_certificates(count=2, signatory_count=3) certificates = self.course.certificates['certificates'] signatory = certificates[1].get("signatories")[1] image_asset_location = AssetLocation.from_deprecated_string(signatory['signature_image_path']) content = contentstore().find(image_asset_location) self.assertIsNotNone(content) test_url = '{}/signatories/1'.format(self._url(cid=1)) response = self.client.delete( test_url, content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 204) self.reload_course() # Verify that certificates are properly updated in the course. certificates = self.course.certificates['certificates'] self.assertEqual(len(certificates[1].get("signatories")), 2) # make sure signatory signature image is deleted too self.assertRaises(NotFoundError, contentstore().find, image_asset_location) def test_deleting_signatory_without_signature(self): """ Delete an signatory whose signature image is already removed or does not exist """ self._add_course_certificates(count=2, signatory_count=4) test_url = '{}/signatories/3'.format(self._url(cid=1)) response = self.client.delete( test_url, content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 204) def test_delete_signatory_non_existing_certificate(self): """ Try to delete a non existing certificate signatory. It should return status code 404 Not found. """ self._add_course_certificates(count=2) test_url = '{}/signatories/1'.format(self._url(cid=100)) response = self.client.delete( test_url, content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 404) def test_certificate_activation_success(self): """ Activate and Deactivate the course certificate """ test_url = reverse_course_url('certificates.certificate_activation_handler', self.course.id) self._add_course_certificates(count=1, signatory_count=2) is_active = True for i in range(2): if i == 1: is_active = not is_active response = self.client.post( test_url, data=json.dumps({"is_active": is_active}), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest" ) self.assertEquals(response.status_code, 200) course = self.store.get_course(self.course.id) certificates = course.certificates['certificates'] self.assertEqual(certificates[0].get('is_active'), is_active) cert_event_type = 'activated' if is_active else 'deactivated' self.assert_event_emitted( '.'.join(['edx.certificate.configuration', cert_event_type]), course_id=unicode(self.course.id), ) @ddt.data(True, False) def test_certificate_activation_without_write_permissions(self, activate): """ Tests certificate Activate and Deactivate should not be allowed if user does not have write permissions on course. """ test_url = reverse_course_url('certificates.certificate_activation_handler', self.course.id) self._add_course_certificates(count=1, signatory_count=2) user = UserFactory() self.client.login(username=user.username, password='test') response = self.client.post( test_url, data=json.dumps({"is_active": activate}), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest" ) self.assertEquals(response.status_code, 403) @ddt.data(True, False) def test_certificate_activation_without_global_staff_permissions(self, activate): """ Tests certificate Activate and Deactivate should not be allowed if user does not have global staff permissions on course. """ test_url = reverse_course_url('certificates.certificate_activation_handler', self.course.id) self._add_course_certificates(count=1, signatory_count=2) user = UserFactory() for role in [CourseInstructorRole, CourseStaffRole]: role(self.course.id).add_users(user) self.client.login(username=user.username, password='test') response = self.client.post( test_url, data=json.dumps({"is_active": activate}), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest" ) self.assertEquals(response.status_code, 403) def test_certificate_activation_failure(self): """ Certificate activation should fail when user has not read access to course then permission denied exception should raised. """ test_url = reverse_course_url('certificates.certificate_activation_handler', self.course.id) test_user_client, test_user = self.create_non_staff_authed_user_client() CourseEnrollment.enroll(test_user, self.course.id) self._add_course_certificates(count=1, signatory_count=2) response = test_user_client.post( test_url, data=json.dumps({"is_active": True}), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEquals(response.status_code, 403) course = self.store.get_course(self.course.id) certificates = course.certificates['certificates'] self.assertEqual(certificates[0].get('is_active'), False)<|fim▁end|>
data=CERTIFICATE_JSON_WITH_SIGNATORIES
<|file_name|>runningjobswidget.js<|end_file_name|><|fim▁begin|>function fetchData{{ id }}() { function onDataReceived(series) {<|fim▁hole|> series: { shadowSize: 0, }, bars: { show: true, barWidth: series.barWidth, align: "center" }, xaxis: { tickDecimals: 0, mode: "time", timezone: "browser", }, yaxis: { tickDecimals: 0, min: 0, }, }); $("#{{ id }}loading").hide(); }; $.ajax({ url: '{% url id %}', type: "GET", dataType: "json", success: onDataReceived, }); }; {% include 'pages/refreshbutton.js' %} $('#{{ id }}remove').click(function(){ var url = "{% url 'usersprofiledash' 'off' id %}"; $.ajax({ url: url, type: "GET", }); });<|fim▁end|>
data = [series]; $.plot("#{{ id }}", data, {
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// ignore-tidy-linelength #![feature(doc_cfg)] pub mod module { pub mod sub_module { pub mod sub_sub_module { pub fn foo() {} } pub fn bar() {} } pub fn whatever() {} } pub fn foobar() {} pub type Alias = u32; #[doc(cfg(feature = "foo-method"))] pub struct Foo { pub x: Alias, } impl Foo { /// Some documentation /// # A Heading pub fn a_method(&self) {} } pub trait Trait { type X; const Y: u32; fn foo() {} } impl Trait for Foo { type X = u32; const Y: u32 = 0; } impl implementors::Whatever for Foo { type Foo = u32; } pub mod sub_mod { /// ```txt<|fim▁hole|> /// aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa /// ``` pub struct Foo; } pub mod long_trait { use std::ops::DerefMut; pub trait ALongNameBecauseItHelpsTestingTheCurrentProblem: DerefMut<Target = u32> + From<u128> + Send + Sync + AsRef<str> + 'static {} } pub mod long_table { /// | This::is::a::kinda::very::long::header::number::one | This::is::a::kinda::very::long::header::number::two | This::is::a::kinda::very::long::header::number::one | This::is::a::kinda::very::long::header::number::two | /// | ----------- | ----------- | ----------- | ----------- | /// | This::is::a::kinda::long::content::number::one | This::is::a::kinda::very::long::content::number::two | This::is::a::kinda::long::content::number::one | This::is::a::kinda::very::long::content::number::two | /// /// I wanna sqdkfnqds f dsqf qds f dsqf dsq f dsq f qds f qds f qds f dsqq f dsf sqdf dsq fds f dsq f dq f ds fq sd fqds f dsq f sqd fsq df sd fdsqfqsd fdsq f dsq f dsqfd s dfq pub struct Foo; /// | This::is::a::kinda::very::long::header::number::one | This::is::a::kinda::very::long::header::number::two | This::is::a::kinda::very::long::header::number::one | This::is::a::kinda::very::long::header::number::two | /// | ----------- | ----------- | ----------- | ----------- | /// | This::is::a::kinda::long::content::number::one | This::is::a::kinda::very::long::content::number::two | This::is::a::kinda::long::content::number::one | This::is::a::kinda::very::long::content::number::two | /// /// I wanna sqdkfnqds f dsqf qds f dsqf dsq f dsq f qds f qds f qds f dsqq f dsf sqdf dsq fds f dsq f dq f ds fq sd fqds f dsq f sqd fsq df sd fdsqfqsd fdsq f dsq f dsqfd s dfq impl Foo { pub fn foo(&self) {} } } pub mod summary_table { /// | header 1 | header 2 | /// | -------- | -------- | /// | content | content | pub struct Foo; } pub mod too_long { pub type ReallyLongTypeNameLongLongLong = Option<unsafe extern "C" fn(a: *const u8, b: *const u8) -> *const u8>; pub const ReallyLongTypeNameLongLongLongConstBecauseWhyNotAConstRightGigaGigaSupraLong: u32 = 0; pub struct SuperIncrediblyLongLongLongLongLongLongLongGigaGigaGigaMegaLongLongLongStructName { pub a: u32, } impl SuperIncrediblyLongLongLongLongLongLongLongGigaGigaGigaMegaLongLongLongStructName { /// ``` /// let x = SuperIncrediblyLongLongLongLongLongLongLongGigaGigaGigaMegaLongLongLongStructName { a: 0 }; /// ``` pub fn foo(&self) {} } }<|fim▁end|>
/// aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa /// ``` /// /// ```
<|file_name|>app.js<|end_file_name|><|fim▁begin|>window.React = require('react'); var App = require('./components/App.react'); var css = require('./../css/app.css'); require('../img/social_media.png'); React.initializeTouchEvents(true); // Render the app component (js/components/App.react.js)<|fim▁hole|>);<|fim▁end|>
React.render( <App />, document.getElementById('app')
<|file_name|>ECPrivKey.java<|end_file_name|><|fim▁begin|>/** * jBorZoi - An Elliptic Curve Cryptography Library * * Copyright (C) 2003 Dragongate Technologies Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ package com.dragongate_technologies.borZoi; import java.math.BigInteger; import java.security.SecureRandom; /** * Elliptic Curve Private Keys consisting of two member variables: dp, * the EC domain parameters and s, the private key which must * be kept secret. * @author <a href="http://www.dragongate-technologies.com">Dragongate Technologies Ltd.</a> * @version 0.90 */ public class ECPrivKey {<|fim▁hole|> /** * The Private Key */ public BigInteger s; /** * Generate a random private key with ECDomainParameters dp */ public ECPrivKey(ECDomainParameters dp) { this.dp = (ECDomainParameters) dp.clone(); SecureRandom rnd = new SecureRandom(); s = new BigInteger(dp.m, rnd); s = s.mod(dp.r); } /** * Generate a private key with ECDomainParameters dp * and private key s */ public ECPrivKey(ECDomainParameters dp, BigInteger s) { this.dp = dp; this.s = s; } public String toString() { String str = new String("dp: ").concat(dp.toString()).concat("\n"); str = str.concat("s: ").concat(s.toString()).concat("\n"); return str; } protected Object clone() { return new ECPrivKey(dp, s); } }<|fim▁end|>
/** * The EC Domain Parameters */ public ECDomainParameters dp;
<|file_name|>index.d.ts<|end_file_name|><|fim▁begin|>/* * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for * license information. * * Code generated by Microsoft (R) AutoRest Code Generator. * Changes may cause incorrect behavior and will be lost if the code is * regenerated. */ import { ServiceClientOptions, RequestOptions, ServiceCallback, HttpOperationResponse } from 'ms-rest'; import * as models from '../models'; /** * @class * DataSources * __NOTE__: An instance of this class is automatically created for an * instance of the SearchServiceClient. */ export interface DataSources { /** * Creates a new Azure Search datasource or updates a datasource if it already * exists. * * @param {string} dataSourceName The name of the datasource to create or * update. * * @param {object} dataSource The definition of the datasource to create or * update. * * @param {string} dataSource.name The name of the datasource. * * @param {string} [dataSource.description] The description of the datasource. * * @param {object} dataSource.type The type of the datasource. * * @param {string} [dataSource.type.name] * * @param {object} dataSource.credentials Credentials for the datasource. * * @param {string} dataSource.credentials.connectionString The connection * string for the datasource. * * @param {object} dataSource.container The data container for the datasource. * * @param {string} dataSource.container.name The name of the table or view (for * Azure SQL data source) or collection (for DocumentDB data source) that will * be indexed. * * @param {string} [dataSource.container.query] A query that is applied to this * data container. The syntax and meaning of this parameter is * datasource-specific. Not supported by Azure SQL datasources. * * @param {object} [dataSource.dataChangeDetectionPolicy] The data change * detection policy for the datasource. * * @param {string} dataSource.dataChangeDetectionPolicy.odatatype Polymorphic * Discriminator * * @param {object} [dataSource.dataDeletionDetectionPolicy] The data deletion * detection policy for the datasource. * * @param {string} dataSource.dataDeletionDetectionPolicy.odatatype Polymorphic * Discriminator * * @param {string} [dataSource.eTag] The ETag of the DataSource. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.accessCondition] Additional parameters for the * operation * * @param {string} [options.accessCondition.ifMatch] Defines the If-Match * condition. The operation will be performed only if the ETag on the server * matches this value. * * @param {string} [options.accessCondition.ifNoneMatch] Defines the * If-None-Match condition. The operation will be performed only if the ETag on * the server does not match this value. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<DataSource>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ createOrUpdateWithHttpOperationResponse(dataSourceName: string, dataSource: models.DataSource, options?: { searchRequestOptions? : models.SearchRequestOptions, accessCondition? : models.AccessCondition, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<models.DataSource>>; /** * Creates a new Azure Search datasource or updates a datasource if it already * exists. * * @param {string} dataSourceName The name of the datasource to create or * update. * * @param {object} dataSource The definition of the datasource to create or * update. * * @param {string} dataSource.name The name of the datasource. * * @param {string} [dataSource.description] The description of the datasource. * * @param {object} dataSource.type The type of the datasource. * * @param {string} [dataSource.type.name] * * @param {object} dataSource.credentials Credentials for the datasource. * * @param {string} dataSource.credentials.connectionString The connection * string for the datasource. * * @param {object} dataSource.container The data container for the datasource. * * @param {string} dataSource.container.name The name of the table or view (for * Azure SQL data source) or collection (for DocumentDB data source) that will * be indexed. * * @param {string} [dataSource.container.query] A query that is applied to this * data container. The syntax and meaning of this parameter is * datasource-specific. Not supported by Azure SQL datasources. * * @param {object} [dataSource.dataChangeDetectionPolicy] The data change * detection policy for the datasource. * * @param {string} dataSource.dataChangeDetectionPolicy.odatatype Polymorphic * Discriminator * * @param {object} [dataSource.dataDeletionDetectionPolicy] The data deletion * detection policy for the datasource. * * @param {string} dataSource.dataDeletionDetectionPolicy.odatatype Polymorphic * Discriminator * * @param {string} [dataSource.eTag] The ETag of the DataSource. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.accessCondition] Additional parameters for the * operation * * @param {string} [options.accessCondition.ifMatch] Defines the If-Match * condition. The operation will be performed only if the ETag on the server * matches this value. * * @param {string} [options.accessCondition.ifNoneMatch] Defines the * If-None-Match condition. The operation will be performed only if the ETag on * the server does not match this value. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {DataSource} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {DataSource} [result] - The deserialized result object if an error did not occur. * See {@link DataSource} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ createOrUpdate(dataSourceName: string, dataSource: models.DataSource, options?: { searchRequestOptions? : models.SearchRequestOptions, accessCondition? : models.AccessCondition, customHeaders? : { [headerName: string]: string; } }): Promise<models.DataSource>; createOrUpdate(dataSourceName: string, dataSource: models.DataSource, callback: ServiceCallback<models.DataSource>): void; createOrUpdate(dataSourceName: string, dataSource: models.DataSource, options: { searchRequestOptions? : models.SearchRequestOptions, accessCondition? : models.AccessCondition, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<models.DataSource>): void; /** * Deletes an Azure Search datasource. * * @param {string} dataSourceName The name of the datasource to delete. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.accessCondition] Additional parameters for the * operation * * @param {string} [options.accessCondition.ifMatch] Defines the If-Match * condition. The operation will be performed only if the ETag on the server * matches this value. * * @param {string} [options.accessCondition.ifNoneMatch] Defines the * If-None-Match condition. The operation will be performed only if the ETag on * the server does not match this value. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<null>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ deleteMethodWithHttpOperationResponse(dataSourceName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, accessCondition? : models.AccessCondition, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<void>>; /** * Deletes an Azure Search datasource. * * @param {string} dataSourceName The name of the datasource to delete. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.accessCondition] Additional parameters for the * operation * * @param {string} [options.accessCondition.ifMatch] Defines the If-Match * condition. The operation will be performed only if the ETag on the server * matches this value. * * @param {string} [options.accessCondition.ifNoneMatch] Defines the * If-None-Match condition. The operation will be performed only if the ETag on * the server does not match this value. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {null} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {null} [result] - The deserialized result object if an error did not occur. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ deleteMethod(dataSourceName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, accessCondition? : models.AccessCondition, customHeaders? : { [headerName: string]: string; } }): Promise<void>; deleteMethod(dataSourceName: string, callback: ServiceCallback<void>): void; deleteMethod(dataSourceName: string, options: { searchRequestOptions? : models.SearchRequestOptions, accessCondition? : models.AccessCondition, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<void>): void; /** * Retrieves a datasource definition from Azure Search. * * @param {string} dataSourceName The name of the datasource to retrieve. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<DataSource>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ getWithHttpOperationResponse(dataSourceName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<models.DataSource>>; /** * Retrieves a datasource definition from Azure Search. * * @param {string} dataSourceName The name of the datasource to retrieve. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {DataSource} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {DataSource} [result] - The deserialized result object if an error did not occur. * See {@link DataSource} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ get(dataSourceName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<models.DataSource>; get(dataSourceName: string, callback: ServiceCallback<models.DataSource>): void; get(dataSourceName: string, options: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<models.DataSource>): void; /** * Lists all datasources available for an Azure Search service. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<DataSourceListResult>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ listWithHttpOperationResponse(options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<models.DataSourceListResult>>; /** * Lists all datasources available for an Azure Search service. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {DataSourceListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {DataSourceListResult} [result] - The deserialized result object if an error did not occur. * See {@link DataSourceListResult} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ list(options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<models.DataSourceListResult>; list(callback: ServiceCallback<models.DataSourceListResult>): void; list(options: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<models.DataSourceListResult>): void; /** * Creates a new Azure Search datasource. * * @param {object} dataSource The definition of the datasource to create. * * @param {string} dataSource.name The name of the datasource. * * @param {string} [dataSource.description] The description of the datasource. * * @param {object} dataSource.type The type of the datasource. * * @param {string} [dataSource.type.name] * * @param {object} dataSource.credentials Credentials for the datasource. * * @param {string} dataSource.credentials.connectionString The connection * string for the datasource. * * @param {object} dataSource.container The data container for the datasource. * * @param {string} dataSource.container.name The name of the table or view (for * Azure SQL data source) or collection (for DocumentDB data source) that will * be indexed. * * @param {string} [dataSource.container.query] A query that is applied to this * data container. The syntax and meaning of this parameter is * datasource-specific. Not supported by Azure SQL datasources. * * @param {object} [dataSource.dataChangeDetectionPolicy] The data change * detection policy for the datasource. * * @param {string} dataSource.dataChangeDetectionPolicy.odatatype Polymorphic * Discriminator * * @param {object} [dataSource.dataDeletionDetectionPolicy] The data deletion * detection policy for the datasource. * * @param {string} dataSource.dataDeletionDetectionPolicy.odatatype Polymorphic * Discriminator * * @param {string} [dataSource.eTag] The ETag of the DataSource. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<DataSource>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ createWithHttpOperationResponse(dataSource: models.DataSource, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<models.DataSource>>; /** * Creates a new Azure Search datasource. * * @param {object} dataSource The definition of the datasource to create. * * @param {string} dataSource.name The name of the datasource. * * @param {string} [dataSource.description] The description of the datasource. * * @param {object} dataSource.type The type of the datasource. * * @param {string} [dataSource.type.name] * * @param {object} dataSource.credentials Credentials for the datasource. * * @param {string} dataSource.credentials.connectionString The connection * string for the datasource. * * @param {object} dataSource.container The data container for the datasource. * * @param {string} dataSource.container.name The name of the table or view (for * Azure SQL data source) or collection (for DocumentDB data source) that will * be indexed. * * @param {string} [dataSource.container.query] A query that is applied to this * data container. The syntax and meaning of this parameter is * datasource-specific. Not supported by Azure SQL datasources. * * @param {object} [dataSource.dataChangeDetectionPolicy] The data change * detection policy for the datasource. * * @param {string} dataSource.dataChangeDetectionPolicy.odatatype Polymorphic * Discriminator * * @param {object} [dataSource.dataDeletionDetectionPolicy] The data deletion * detection policy for the datasource. * * @param {string} dataSource.dataDeletionDetectionPolicy.odatatype Polymorphic * Discriminator * * @param {string} [dataSource.eTag] The ETag of the DataSource. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {DataSource} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {DataSource} [result] - The deserialized result object if an error did not occur. * See {@link DataSource} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ create(dataSource: models.DataSource, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<models.DataSource>; create(dataSource: models.DataSource, callback: ServiceCallback<models.DataSource>): void; create(dataSource: models.DataSource, options: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<models.DataSource>): void; } /** * @class * Indexers * __NOTE__: An instance of this class is automatically created for an * instance of the SearchServiceClient. */ export interface Indexers { /** * Resets the change tracking state associated with an Azure Search indexer. * * @param {string} indexerName The name of the indexer to reset. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<null>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ resetWithHttpOperationResponse(indexerName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<void>>; /** * Resets the change tracking state associated with an Azure Search indexer. * * @param {string} indexerName The name of the indexer to reset. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {null} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {null} [result] - The deserialized result object if an error did not occur. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ reset(indexerName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<void>; reset(indexerName: string, callback: ServiceCallback<void>): void; reset(indexerName: string, options: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<void>): void; /** * Runs an Azure Search indexer on-demand. * * @param {string} indexerName The name of the indexer to run. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<null>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ runWithHttpOperationResponse(indexerName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<void>>; /** * Runs an Azure Search indexer on-demand. * * @param {string} indexerName The name of the indexer to run. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {null} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {null} [result] - The deserialized result object if an error did not occur. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ run(indexerName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<void>; run(indexerName: string, callback: ServiceCallback<void>): void; run(indexerName: string, options: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<void>): void; /** * Creates a new Azure Search indexer or updates an indexer if it already * exists. * * @param {string} indexerName The name of the indexer to create or update. * * @param {object} indexer The definition of the indexer to create or update. * * @param {string} indexer.name The name of the indexer. * * @param {string} [indexer.description] The description of the indexer. * * @param {string} indexer.dataSourceName The name of the datasource from which * this indexer reads data. * * @param {string} indexer.targetIndexName The name of the index to which this * indexer writes data. * * @param {object} [indexer.schedule] The schedule for this indexer. * * @param {moment.duration} indexer.schedule.interval The interval of time * between indexer executions. * * @param {date} [indexer.schedule.startTime] The time when an indexer should * start running. * * @param {object} [indexer.parameters] Parameters for indexer execution. * * @param {number} [indexer.parameters.batchSize] The number of items that are * read from the data source and indexed as a single batch in order to improve * performance. The default depends on the data source type. * * @param {number} [indexer.parameters.maxFailedItems] The maximum number of * items that can fail indexing for indexer execution to still be considered * successful. -1 means no limit. Default is 0. * * @param {number} [indexer.parameters.maxFailedItemsPerBatch] The maximum * number of items in a single batch that can fail indexing for the batch to * still be considered successful. -1 means no limit. Default is 0. * * @param {boolean} [indexer.parameters.base64EncodeKeys] Whether indexer will * base64-encode all values that are inserted into key field of the target * index. This is needed if keys can contain characters that are invalid in * keys (such as dot '.'). Default is false. * * @param {object} [indexer.parameters.configuration] A dictionary of * indexer-specific configuration properties. Each name is the name of a * specific property. Each value must be of a primitive type. * * @param {array} [indexer.fieldMappings] Defines mappings between fields in * the data source and corresponding target fields in the index. * * @param {boolean} [indexer.isDisabled] A value indicating whether the indexer * is disabled. Default is false. * * @param {string} [indexer.eTag] The ETag of the Indexer. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.accessCondition] Additional parameters for the * operation * * @param {string} [options.accessCondition.ifMatch] Defines the If-Match * condition. The operation will be performed only if the ETag on the server * matches this value. * * @param {string} [options.accessCondition.ifNoneMatch] Defines the * If-None-Match condition. The operation will be performed only if the ETag on * the server does not match this value. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<Indexer>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ createOrUpdateWithHttpOperationResponse(indexerName: string, indexer: models.Indexer, options?: { searchRequestOptions? : models.SearchRequestOptions, accessCondition? : models.AccessCondition, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<models.Indexer>>; /** * Creates a new Azure Search indexer or updates an indexer if it already * exists. * * @param {string} indexerName The name of the indexer to create or update. * * @param {object} indexer The definition of the indexer to create or update. * * @param {string} indexer.name The name of the indexer. * * @param {string} [indexer.description] The description of the indexer. * * @param {string} indexer.dataSourceName The name of the datasource from which * this indexer reads data. * * @param {string} indexer.targetIndexName The name of the index to which this * indexer writes data. * * @param {object} [indexer.schedule] The schedule for this indexer. * * @param {moment.duration} indexer.schedule.interval The interval of time * between indexer executions. * * @param {date} [indexer.schedule.startTime] The time when an indexer should * start running. * * @param {object} [indexer.parameters] Parameters for indexer execution. * * @param {number} [indexer.parameters.batchSize] The number of items that are * read from the data source and indexed as a single batch in order to improve * performance. The default depends on the data source type. * * @param {number} [indexer.parameters.maxFailedItems] The maximum number of * items that can fail indexing for indexer execution to still be considered * successful. -1 means no limit. Default is 0. * * @param {number} [indexer.parameters.maxFailedItemsPerBatch] The maximum * number of items in a single batch that can fail indexing for the batch to * still be considered successful. -1 means no limit. Default is 0. * * @param {boolean} [indexer.parameters.base64EncodeKeys] Whether indexer will * base64-encode all values that are inserted into key field of the target * index. This is needed if keys can contain characters that are invalid in * keys (such as dot '.'). Default is false. * * @param {object} [indexer.parameters.configuration] A dictionary of * indexer-specific configuration properties. Each name is the name of a * specific property. Each value must be of a primitive type. * * @param {array} [indexer.fieldMappings] Defines mappings between fields in * the data source and corresponding target fields in the index. * * @param {boolean} [indexer.isDisabled] A value indicating whether the indexer * is disabled. Default is false. * * @param {string} [indexer.eTag] The ETag of the Indexer. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.accessCondition] Additional parameters for the * operation * * @param {string} [options.accessCondition.ifMatch] Defines the If-Match * condition. The operation will be performed only if the ETag on the server * matches this value. * * @param {string} [options.accessCondition.ifNoneMatch] Defines the * If-None-Match condition. The operation will be performed only if the ETag on * the server does not match this value. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {Indexer} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {Indexer} [result] - The deserialized result object if an error did not occur. * See {@link Indexer} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ createOrUpdate(indexerName: string, indexer: models.Indexer, options?: { searchRequestOptions? : models.SearchRequestOptions, accessCondition? : models.AccessCondition, customHeaders? : { [headerName: string]: string; } }): Promise<models.Indexer>; createOrUpdate(indexerName: string, indexer: models.Indexer, callback: ServiceCallback<models.Indexer>): void; createOrUpdate(indexerName: string, indexer: models.Indexer, options: { searchRequestOptions? : models.SearchRequestOptions, accessCondition? : models.AccessCondition, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<models.Indexer>): void; /** * Deletes an Azure Search indexer. * * @param {string} indexerName The name of the indexer to delete. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.accessCondition] Additional parameters for the * operation * * @param {string} [options.accessCondition.ifMatch] Defines the If-Match * condition. The operation will be performed only if the ETag on the server * matches this value. * * @param {string} [options.accessCondition.ifNoneMatch] Defines the * If-None-Match condition. The operation will be performed only if the ETag on * the server does not match this value. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<null>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ deleteMethodWithHttpOperationResponse(indexerName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, accessCondition? : models.AccessCondition, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<void>>; /** * Deletes an Azure Search indexer. * * @param {string} indexerName The name of the indexer to delete. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.accessCondition] Additional parameters for the * operation * * @param {string} [options.accessCondition.ifMatch] Defines the If-Match * condition. The operation will be performed only if the ETag on the server * matches this value. * * @param {string} [options.accessCondition.ifNoneMatch] Defines the * If-None-Match condition. The operation will be performed only if the ETag on * the server does not match this value. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {null} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {null} [result] - The deserialized result object if an error did not occur. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ deleteMethod(indexerName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, accessCondition? : models.AccessCondition, customHeaders? : { [headerName: string]: string; } }): Promise<void>; deleteMethod(indexerName: string, callback: ServiceCallback<void>): void; deleteMethod(indexerName: string, options: { searchRequestOptions? : models.SearchRequestOptions, accessCondition? : models.AccessCondition, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<void>): void; /** * Retrieves an indexer definition from Azure Search. * * @param {string} indexerName The name of the indexer to retrieve. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<Indexer>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ getWithHttpOperationResponse(indexerName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<models.Indexer>>; /** * Retrieves an indexer definition from Azure Search. * * @param {string} indexerName The name of the indexer to retrieve. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {Indexer} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {Indexer} [result] - The deserialized result object if an error did not occur. * See {@link Indexer} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ get(indexerName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<models.Indexer>; get(indexerName: string, callback: ServiceCallback<models.Indexer>): void; get(indexerName: string, options: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<models.Indexer>): void; /** * Lists all indexers available for an Azure Search service. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<IndexerListResult>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ listWithHttpOperationResponse(options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<models.IndexerListResult>>; /** * Lists all indexers available for an Azure Search service. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {IndexerListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {IndexerListResult} [result] - The deserialized result object if an error did not occur. * See {@link IndexerListResult} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ list(options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<models.IndexerListResult>; list(callback: ServiceCallback<models.IndexerListResult>): void; list(options: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<models.IndexerListResult>): void; /** * Creates a new Azure Search indexer. * * @param {object} indexer The definition of the indexer to create. * * @param {string} indexer.name The name of the indexer. * * @param {string} [indexer.description] The description of the indexer. * * @param {string} indexer.dataSourceName The name of the datasource from which * this indexer reads data. * * @param {string} indexer.targetIndexName The name of the index to which this * indexer writes data. * * @param {object} [indexer.schedule] The schedule for this indexer. * * @param {moment.duration} indexer.schedule.interval The interval of time * between indexer executions. * * @param {date} [indexer.schedule.startTime] The time when an indexer should * start running. * * @param {object} [indexer.parameters] Parameters for indexer execution. * * @param {number} [indexer.parameters.batchSize] The number of items that are * read from the data source and indexed as a single batch in order to improve * performance. The default depends on the data source type. * * @param {number} [indexer.parameters.maxFailedItems] The maximum number of * items that can fail indexing for indexer execution to still be considered * successful. -1 means no limit. Default is 0. * * @param {number} [indexer.parameters.maxFailedItemsPerBatch] The maximum * number of items in a single batch that can fail indexing for the batch to * still be considered successful. -1 means no limit. Default is 0. * * @param {boolean} [indexer.parameters.base64EncodeKeys] Whether indexer will * base64-encode all values that are inserted into key field of the target * index. This is needed if keys can contain characters that are invalid in * keys (such as dot '.'). Default is false. * * @param {object} [indexer.parameters.configuration] A dictionary of * indexer-specific configuration properties. Each name is the name of a * specific property. Each value must be of a primitive type. * * @param {array} [indexer.fieldMappings] Defines mappings between fields in * the data source and corresponding target fields in the index. * * @param {boolean} [indexer.isDisabled] A value indicating whether the indexer * is disabled. Default is false. * * @param {string} [indexer.eTag] The ETag of the Indexer. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<Indexer>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ createWithHttpOperationResponse(indexer: models.Indexer, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<models.Indexer>>; /** * Creates a new Azure Search indexer. * * @param {object} indexer The definition of the indexer to create. * * @param {string} indexer.name The name of the indexer. * * @param {string} [indexer.description] The description of the indexer. * * @param {string} indexer.dataSourceName The name of the datasource from which * this indexer reads data. * * @param {string} indexer.targetIndexName The name of the index to which this * indexer writes data. * * @param {object} [indexer.schedule] The schedule for this indexer. * * @param {moment.duration} indexer.schedule.interval The interval of time * between indexer executions. * * @param {date} [indexer.schedule.startTime] The time when an indexer should * start running. * * @param {object} [indexer.parameters] Parameters for indexer execution. * * @param {number} [indexer.parameters.batchSize] The number of items that are * read from the data source and indexed as a single batch in order to improve * performance. The default depends on the data source type. * * @param {number} [indexer.parameters.maxFailedItems] The maximum number of * items that can fail indexing for indexer execution to still be considered * successful. -1 means no limit. Default is 0. * * @param {number} [indexer.parameters.maxFailedItemsPerBatch] The maximum * number of items in a single batch that can fail indexing for the batch to * still be considered successful. -1 means no limit. Default is 0. * * @param {boolean} [indexer.parameters.base64EncodeKeys] Whether indexer will * base64-encode all values that are inserted into key field of the target * index. This is needed if keys can contain characters that are invalid in * keys (such as dot '.'). Default is false. * * @param {object} [indexer.parameters.configuration] A dictionary of * indexer-specific configuration properties. Each name is the name of a * specific property. Each value must be of a primitive type. * * @param {array} [indexer.fieldMappings] Defines mappings between fields in * the data source and corresponding target fields in the index. * * @param {boolean} [indexer.isDisabled] A value indicating whether the indexer * is disabled. Default is false. * * @param {string} [indexer.eTag] The ETag of the Indexer. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {Indexer} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {Indexer} [result] - The deserialized result object if an error did not occur. * See {@link Indexer} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ create(indexer: models.Indexer, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<models.Indexer>; create(indexer: models.Indexer, callback: ServiceCallback<models.Indexer>): void; create(indexer: models.Indexer, options: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<models.Indexer>): void; /** * Returns the current status and execution history of an indexer. * * @param {string} indexerName The name of the indexer for which to retrieve * status. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<IndexerExecutionInfo>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ getStatusWithHttpOperationResponse(indexerName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<models.IndexerExecutionInfo>>; /** * Returns the current status and execution history of an indexer. * * @param {string} indexerName The name of the indexer for which to retrieve * status. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {IndexerExecutionInfo} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {IndexerExecutionInfo} [result] - The deserialized result object if an error did not occur. * See {@link IndexerExecutionInfo} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ getStatus(indexerName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<models.IndexerExecutionInfo>; getStatus(indexerName: string, callback: ServiceCallback<models.IndexerExecutionInfo>): void; getStatus(indexerName: string, options: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<models.IndexerExecutionInfo>): void; } /** * @class * Indexes * __NOTE__: An instance of this class is automatically created for an * instance of the SearchServiceClient. */ export interface Indexes { /**<|fim▁hole|> * * @param {string} index.name The name of the index. * * @param {array} index.fields The fields of the index. * * @param {array} [index.scoringProfiles] The scoring profiles for the index. * * @param {string} [index.defaultScoringProfile] The name of the scoring * profile to use if none is specified in the query. If this property is not * set and no scoring profile is specified in the query, then default scoring * (tf-idf) will be used. * * @param {object} [index.corsOptions] Options to control Cross-Origin Resource * Sharing (CORS) for the index. * * @param {array} index.corsOptions.allowedOrigins The list of origins from * which JavaScript code will be granted access to your index. Can contain a * list of hosts of the form * {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to * allow all origins (not recommended). * * @param {number} [index.corsOptions.maxAgeInSeconds] The duration for which * browsers should cache CORS preflight responses. Defaults to 5 mintues. * * @param {array} [index.suggesters] The suggesters for the index. * * @param {array} [index.analyzers] The analyzers for the index. * * @param {array} [index.tokenizers] The tokenizers for the index. * * @param {array} [index.tokenFilters] The token filters for the index. * * @param {array} [index.charFilters] The character filters for the index. * * @param {string} [index.eTag] The ETag of the index. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<Index>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ createWithHttpOperationResponse(index: models.Index, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<models.Index>>; /** * Creates a new Azure Search index. * * @param {object} index The definition of the index to create. * * @param {string} index.name The name of the index. * * @param {array} index.fields The fields of the index. * * @param {array} [index.scoringProfiles] The scoring profiles for the index. * * @param {string} [index.defaultScoringProfile] The name of the scoring * profile to use if none is specified in the query. If this property is not * set and no scoring profile is specified in the query, then default scoring * (tf-idf) will be used. * * @param {object} [index.corsOptions] Options to control Cross-Origin Resource * Sharing (CORS) for the index. * * @param {array} index.corsOptions.allowedOrigins The list of origins from * which JavaScript code will be granted access to your index. Can contain a * list of hosts of the form * {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to * allow all origins (not recommended). * * @param {number} [index.corsOptions.maxAgeInSeconds] The duration for which * browsers should cache CORS preflight responses. Defaults to 5 mintues. * * @param {array} [index.suggesters] The suggesters for the index. * * @param {array} [index.analyzers] The analyzers for the index. * * @param {array} [index.tokenizers] The tokenizers for the index. * * @param {array} [index.tokenFilters] The token filters for the index. * * @param {array} [index.charFilters] The character filters for the index. * * @param {string} [index.eTag] The ETag of the index. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {Index} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {Index} [result] - The deserialized result object if an error did not occur. * See {@link Index} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ create(index: models.Index, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<models.Index>; create(index: models.Index, callback: ServiceCallback<models.Index>): void; create(index: models.Index, options: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<models.Index>): void; /** * Lists all indexes available for an Azure Search service. * * @param {object} [options] Optional Parameters. * * @param {string} [options.select] Selects which properties of the index * definitions to retrieve. Specified as a comma-separated list of JSON * property names, or '*' for all properties. The default is all properties. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<IndexListResult>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ listWithHttpOperationResponse(options?: { select? : string, searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<models.IndexListResult>>; /** * Lists all indexes available for an Azure Search service. * * @param {object} [options] Optional Parameters. * * @param {string} [options.select] Selects which properties of the index * definitions to retrieve. Specified as a comma-separated list of JSON * property names, or '*' for all properties. The default is all properties. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {IndexListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {IndexListResult} [result] - The deserialized result object if an error did not occur. * See {@link IndexListResult} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ list(options?: { select? : string, searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<models.IndexListResult>; list(callback: ServiceCallback<models.IndexListResult>): void; list(options: { select? : string, searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<models.IndexListResult>): void; /** * Creates a new Azure Search index or updates an index if it already exists. * * @param {string} indexName The definition of the index to create or update. * * @param {object} index The definition of the index to create or update. * * @param {string} index.name The name of the index. * * @param {array} index.fields The fields of the index. * * @param {array} [index.scoringProfiles] The scoring profiles for the index. * * @param {string} [index.defaultScoringProfile] The name of the scoring * profile to use if none is specified in the query. If this property is not * set and no scoring profile is specified in the query, then default scoring * (tf-idf) will be used. * * @param {object} [index.corsOptions] Options to control Cross-Origin Resource * Sharing (CORS) for the index. * * @param {array} index.corsOptions.allowedOrigins The list of origins from * which JavaScript code will be granted access to your index. Can contain a * list of hosts of the form * {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to * allow all origins (not recommended). * * @param {number} [index.corsOptions.maxAgeInSeconds] The duration for which * browsers should cache CORS preflight responses. Defaults to 5 mintues. * * @param {array} [index.suggesters] The suggesters for the index. * * @param {array} [index.analyzers] The analyzers for the index. * * @param {array} [index.tokenizers] The tokenizers for the index. * * @param {array} [index.tokenFilters] The token filters for the index. * * @param {array} [index.charFilters] The character filters for the index. * * @param {string} [index.eTag] The ETag of the index. * * @param {object} [options] Optional Parameters. * * @param {boolean} [options.allowIndexDowntime] Allows new analyzers, * tokenizers, token filters, or char filters to be added to an index by taking * the index offline for at least a few seconds. This temporarily causes * indexing and query requests to fail. Performance and write availability of * the index can be impaired for several minutes after the index is updated, or * longer for very large indexes. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.accessCondition] Additional parameters for the * operation * * @param {string} [options.accessCondition.ifMatch] Defines the If-Match * condition. The operation will be performed only if the ETag on the server * matches this value. * * @param {string} [options.accessCondition.ifNoneMatch] Defines the * If-None-Match condition. The operation will be performed only if the ETag on * the server does not match this value. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<Index>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ createOrUpdateWithHttpOperationResponse(indexName: string, index: models.Index, options?: { allowIndexDowntime? : boolean, searchRequestOptions? : models.SearchRequestOptions, accessCondition? : models.AccessCondition, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<models.Index>>; /** * Creates a new Azure Search index or updates an index if it already exists. * * @param {string} indexName The definition of the index to create or update. * * @param {object} index The definition of the index to create or update. * * @param {string} index.name The name of the index. * * @param {array} index.fields The fields of the index. * * @param {array} [index.scoringProfiles] The scoring profiles for the index. * * @param {string} [index.defaultScoringProfile] The name of the scoring * profile to use if none is specified in the query. If this property is not * set and no scoring profile is specified in the query, then default scoring * (tf-idf) will be used. * * @param {object} [index.corsOptions] Options to control Cross-Origin Resource * Sharing (CORS) for the index. * * @param {array} index.corsOptions.allowedOrigins The list of origins from * which JavaScript code will be granted access to your index. Can contain a * list of hosts of the form * {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to * allow all origins (not recommended). * * @param {number} [index.corsOptions.maxAgeInSeconds] The duration for which * browsers should cache CORS preflight responses. Defaults to 5 mintues. * * @param {array} [index.suggesters] The suggesters for the index. * * @param {array} [index.analyzers] The analyzers for the index. * * @param {array} [index.tokenizers] The tokenizers for the index. * * @param {array} [index.tokenFilters] The token filters for the index. * * @param {array} [index.charFilters] The character filters for the index. * * @param {string} [index.eTag] The ETag of the index. * * @param {object} [options] Optional Parameters. * * @param {boolean} [options.allowIndexDowntime] Allows new analyzers, * tokenizers, token filters, or char filters to be added to an index by taking * the index offline for at least a few seconds. This temporarily causes * indexing and query requests to fail. Performance and write availability of * the index can be impaired for several minutes after the index is updated, or * longer for very large indexes. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.accessCondition] Additional parameters for the * operation * * @param {string} [options.accessCondition.ifMatch] Defines the If-Match * condition. The operation will be performed only if the ETag on the server * matches this value. * * @param {string} [options.accessCondition.ifNoneMatch] Defines the * If-None-Match condition. The operation will be performed only if the ETag on * the server does not match this value. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {Index} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {Index} [result] - The deserialized result object if an error did not occur. * See {@link Index} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ createOrUpdate(indexName: string, index: models.Index, options?: { allowIndexDowntime? : boolean, searchRequestOptions? : models.SearchRequestOptions, accessCondition? : models.AccessCondition, customHeaders? : { [headerName: string]: string; } }): Promise<models.Index>; createOrUpdate(indexName: string, index: models.Index, callback: ServiceCallback<models.Index>): void; createOrUpdate(indexName: string, index: models.Index, options: { allowIndexDowntime? : boolean, searchRequestOptions? : models.SearchRequestOptions, accessCondition? : models.AccessCondition, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<models.Index>): void; /** * Deletes an Azure Search index and all the documents it contains. * * @param {string} indexName The name of the index to delete. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.accessCondition] Additional parameters for the * operation * * @param {string} [options.accessCondition.ifMatch] Defines the If-Match * condition. The operation will be performed only if the ETag on the server * matches this value. * * @param {string} [options.accessCondition.ifNoneMatch] Defines the * If-None-Match condition. The operation will be performed only if the ETag on * the server does not match this value. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<null>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ deleteMethodWithHttpOperationResponse(indexName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, accessCondition? : models.AccessCondition, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<void>>; /** * Deletes an Azure Search index and all the documents it contains. * * @param {string} indexName The name of the index to delete. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.accessCondition] Additional parameters for the * operation * * @param {string} [options.accessCondition.ifMatch] Defines the If-Match * condition. The operation will be performed only if the ETag on the server * matches this value. * * @param {string} [options.accessCondition.ifNoneMatch] Defines the * If-None-Match condition. The operation will be performed only if the ETag on * the server does not match this value. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {null} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {null} [result] - The deserialized result object if an error did not occur. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ deleteMethod(indexName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, accessCondition? : models.AccessCondition, customHeaders? : { [headerName: string]: string; } }): Promise<void>; deleteMethod(indexName: string, callback: ServiceCallback<void>): void; deleteMethod(indexName: string, options: { searchRequestOptions? : models.SearchRequestOptions, accessCondition? : models.AccessCondition, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<void>): void; /** * Retrieves an index definition from Azure Search. * * @param {string} indexName The name of the index to retrieve. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<Index>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ getWithHttpOperationResponse(indexName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<models.Index>>; /** * Retrieves an index definition from Azure Search. * * @param {string} indexName The name of the index to retrieve. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {Index} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {Index} [result] - The deserialized result object if an error did not occur. * See {@link Index} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ get(indexName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<models.Index>; get(indexName: string, callback: ServiceCallback<models.Index>): void; get(indexName: string, options: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<models.Index>): void; /** * Returns statistics for the given index, including a document count and * storage usage. * * @param {string} indexName The name of the index for which to retrieve * statistics. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<IndexGetStatisticsResult>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ getStatisticsWithHttpOperationResponse(indexName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<models.IndexGetStatisticsResult>>; /** * Returns statistics for the given index, including a document count and * storage usage. * * @param {string} indexName The name of the index for which to retrieve * statistics. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {IndexGetStatisticsResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {IndexGetStatisticsResult} [result] - The deserialized result object if an error did not occur. * See {@link IndexGetStatisticsResult} for more * information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ getStatistics(indexName: string, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<models.IndexGetStatisticsResult>; getStatistics(indexName: string, callback: ServiceCallback<models.IndexGetStatisticsResult>): void; getStatistics(indexName: string, options: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<models.IndexGetStatisticsResult>): void; /** * Shows how an analyzer breaks text into tokens. * * @param {string} indexName The name of the index for which to test an * analyzer. * * @param {object} request The text and analyzer or analysis components to * test. * * @param {string} request.text The text to break into tokens. * * @param {object} [request.analyzer] The name of the analyzer to use to break * the given text. If this parameter is not specified, you must specify a * tokenizer instead. The tokenizer and analyzer parameters are mutually * exclusive. * * @param {string} [request.analyzer.name] * * @param {object} [request.tokenizer] The name of the tokenizer to use to * break the given text. If this parameter is not specified, you must specify * an analyzer instead. The tokenizer and analyzer parameters are mutually * exclusive. * * @param {string} [request.tokenizer.name] * * @param {array} [request.tokenFilters] An optional list of token filters to * use when breaking the given text. This parameter can only be set when using * the tokenizer parameter. * * @param {array} [request.charFilters] An optional list of character filters * to use when breaking the given text. This parameter can only be set when * using the tokenizer parameter. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * * @resolve {HttpOperationResponse<AnalyzeResult>} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ analyzeWithHttpOperationResponse(indexName: string, request: models.AnalyzeRequest, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<HttpOperationResponse<models.AnalyzeResult>>; /** * Shows how an analyzer breaks text into tokens. * * @param {string} indexName The name of the index for which to test an * analyzer. * * @param {object} request The text and analyzer or analysis components to * test. * * @param {string} request.text The text to break into tokens. * * @param {object} [request.analyzer] The name of the analyzer to use to break * the given text. If this parameter is not specified, you must specify a * tokenizer instead. The tokenizer and analyzer parameters are mutually * exclusive. * * @param {string} [request.analyzer.name] * * @param {object} [request.tokenizer] The name of the tokenizer to use to * break the given text. If this parameter is not specified, you must specify * an analyzer instead. The tokenizer and analyzer parameters are mutually * exclusive. * * @param {string} [request.tokenizer.name] * * @param {array} [request.tokenFilters] An optional list of token filters to * use when breaking the given text. This parameter can only be set when using * the tokenizer parameter. * * @param {array} [request.charFilters] An optional list of character filters * to use when breaking the given text. This parameter can only be set when * using the tokenizer parameter. * * @param {object} [options] Optional Parameters. * * @param {object} [options.searchRequestOptions] Additional parameters for the * operation * * @param {uuid} [options.searchRequestOptions.clientRequestId] The tracking ID * sent with the request to help with debugging. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @param {ServiceCallback} [optionalCallback] - The optional callback. * * @returns {ServiceCallback|Promise} If a callback was passed as the last * parameter then it returns the callback else returns a Promise. * * {Promise} A promise is returned. * * @resolve {AnalyzeResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * * {ServiceCallback} optionalCallback(err, result, request, response) * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * * {AnalyzeResult} [result] - The deserialized result object if an error did not occur. * See {@link AnalyzeResult} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ analyze(indexName: string, request: models.AnalyzeRequest, options?: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }): Promise<models.AnalyzeResult>; analyze(indexName: string, request: models.AnalyzeRequest, callback: ServiceCallback<models.AnalyzeResult>): void; analyze(indexName: string, request: models.AnalyzeRequest, options: { searchRequestOptions? : models.SearchRequestOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback<models.AnalyzeResult>): void; }<|fim▁end|>
* Creates a new Azure Search index. * * @param {object} index The definition of the index to create.
<|file_name|>windows_env_start.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Copyright 2020 The Pigweed Authors # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. """Prints the env_setup banner for cmd.exe. This is done from Python as activating colors and printing ASCII art are not easy to do in cmd.exe. Activated colors also don't persist in the parent process. """ from __future__ import print_function import argparse import os<|fim▁hole|> from .colors import Color, enable_colors # type: ignore _PIGWEED_BANNER = u''' ▒█████▄ █▓ ▄███▒ ▒█ ▒█ ░▓████▒ ░▓████▒ ▒▓████▄ ▒█░ █░ ░█▒ ██▒ ▀█▒ ▒█░ █ ▒█ ▒█ ▀ ▒█ ▀ ▒█ ▀█▌ ▒█▄▄▄█░ ░█▒ █▓░ ▄▄░ ▒█░ █ ▒█ ▒███ ▒███ ░█ █▌ ▒█▀ ░█░ ▓█ █▓ ░█░ █ ▒█ ▒█ ▄ ▒█ ▄ ░█ ▄█▌ ▒█ ░█░ ░▓███▀ ▒█▓▀▓█░ ░▓████▒ ░▓████▒ ▒▓████▀ ''' def print_banner(bootstrap, no_shell_file): """Print the Pigweed or project-specific banner""" enable_colors() print(Color.green('\n WELCOME TO...')) print(Color.magenta(_PIGWEED_BANNER)) if bootstrap: print( Color.green('\n BOOTSTRAP! Bootstrap may take a few minutes; ' 'please be patient')) print( Color.green( ' On Windows, this stage is extremely slow (~10 minutes).\n')) else: print( Color.green( '\n ACTIVATOR! This sets your console environment variables.\n' )) if no_shell_file: print(Color.bold_red('Error!\n')) print( Color.red(' Your Pigweed environment does not seem to be' ' configured.')) print(Color.red(' Run bootstrap.bat to perform initial setup.')) return 0 def parse(): """Parse command-line arguments.""" parser = argparse.ArgumentParser() parser.add_argument('--bootstrap', action='store_true') parser.add_argument('--no-shell-file', action='store_true') return parser.parse_args() def main(): """Script entry point.""" if os.name != 'nt': return 1 return print_banner(**vars(parse())) if __name__ == '__main__': sys.exit(main())<|fim▁end|>
import sys
<|file_name|>Environment.py<|end_file_name|><|fim▁begin|>"""SCons.Environment Base class for construction Environments. These are the primary objects used to communicate dependency and construction information to the build engine. Keyword arguments supplied when the construction Environment is created are construction variables used to initialize the Environment """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. __revision__ = "src/engine/SCons/Environment.py 2014/03/02 14:18:15 garyo" import copy import os import sys import re import shlex from collections import UserDict import SCons.Action import SCons.Builder import SCons.Debug from SCons.Debug import logInstanceCreation import SCons.Defaults import SCons.Errors import SCons.Memoize import SCons.Node import SCons.Node.Alias import SCons.Node.FS import SCons.Node.Python import SCons.Platform import SCons.SConf import SCons.SConsign import SCons.Subst import SCons.Tool import SCons.Util import SCons.Warnings class _Null(object): pass _null = _Null _warn_copy_deprecated = True _warn_source_signatures_deprecated = True _warn_target_signatures_deprecated = True CleanTargets = {} CalculatorArgs = {} semi_deepcopy = SCons.Util.semi_deepcopy semi_deepcopy_dict = SCons.Util.semi_deepcopy_dict # Pull UserError into the global name space for the benefit of # Environment().SourceSignatures(), which has some import statements # which seem to mess up its ability to reference SCons directly. UserError = SCons.Errors.UserError def alias_builder(env, target, source): pass AliasBuilder = SCons.Builder.Builder(action = alias_builder, target_factory = SCons.Node.Alias.default_ans.Alias, source_factory = SCons.Node.FS.Entry, multi = 1, is_explicit = None, name='AliasBuilder') def apply_tools(env, tools, toolpath): # Store the toolpath in the Environment. if toolpath is not None: env['toolpath'] = toolpath if not tools: return # Filter out null tools from the list. for tool in [_f for _f in tools if _f]: if SCons.Util.is_List(tool) or isinstance(tool, tuple): toolname = tool[0] toolargs = tool[1] # should be a dict of kw args tool = env.Tool(toolname, **toolargs) else: env.Tool(tool) # These names are (or will be) controlled by SCons; users should never # set or override them. This warning can optionally be turned off, # but scons will still ignore the illegal variable names even if it's off. reserved_construction_var_names = [ 'CHANGED_SOURCES', 'CHANGED_TARGETS', 'SOURCE', 'SOURCES', 'TARGET', 'TARGETS', 'UNCHANGED_SOURCES', 'UNCHANGED_TARGETS', ] future_reserved_construction_var_names = [ #'HOST_OS', #'HOST_ARCH', #'HOST_CPU', ] def copy_non_reserved_keywords(dict): result = semi_deepcopy(dict) for k in result.keys(): if k in reserved_construction_var_names: msg = "Ignoring attempt to set reserved variable `$%s'" SCons.Warnings.warn(SCons.Warnings.ReservedVariableWarning, msg % k) del result[k] return result def _set_reserved(env, key, value): msg = "Ignoring attempt to set reserved variable `$%s'" SCons.Warnings.warn(SCons.Warnings.ReservedVariableWarning, msg % key) def _set_future_reserved(env, key, value): env._dict[key] = value msg = "`$%s' will be reserved in a future release and setting it will become ignored" SCons.Warnings.warn(SCons.Warnings.FutureReservedVariableWarning, msg % key) def _set_BUILDERS(env, key, value): try: bd = env._dict[key] for k in bd.keys(): del bd[k] except KeyError: bd = BuilderDict(kwbd, env) env._dict[key] = bd for k, v in value.items(): if not SCons.Builder.is_a_Builder(v): raise SCons.Errors.UserError('%s is not a Builder.' % repr(v)) bd.update(value) def _del_SCANNERS(env, key): del env._dict[key] env.scanner_map_delete() def _set_SCANNERS(env, key, value): env._dict[key] = value env.scanner_map_delete() def _delete_duplicates(l, keep_last): """Delete duplicates from a sequence, keeping the first or last.""" seen={} result=[] if keep_last: # reverse in & out, then keep first l.reverse() for i in l: try: if i not in seen: result.append(i) seen[i]=1 except TypeError: # probably unhashable. Just keep it. result.append(i) if keep_last: result.reverse() return result # The following is partly based on code in a comment added by Peter # Shannon at the following page (there called the "transplant" class): # # ASPN : Python Cookbook : Dynamically added methods to a class # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/81732 # # We had independently been using the idiom as BuilderWrapper, but # factoring out the common parts into this base class, and making # BuilderWrapper a subclass that overrides __call__() to enforce specific # Builder calling conventions, simplified some of our higher-layer code. class MethodWrapper(object): """ A generic Wrapper class that associates a method (which can actually be any callable) with an object. As part of creating this MethodWrapper object an attribute with the specified (by default, the name of the supplied method) is added to the underlying object. When that new "method" is called, our __call__() method adds the object as the first argument, simulating the Python behavior of supplying "self" on method calls. We hang on to the name by which the method was added to the underlying base class so that we can provide a method to "clone" ourselves onto a new underlying object being copied (without which we wouldn't need to save that info). """ def __init__(self, object, method, name=None): if name is None: name = method.__name__ self.object = object self.method = method self.name = name setattr(self.object, name, self) def __call__(self, *args, **kwargs): nargs = (self.object,) + args return self.method(*nargs, **kwargs) def clone(self, new_object): """ Returns an object that re-binds the underlying "method" to the specified new object. """ return self.__class__(new_object, self.method, self.name) class BuilderWrapper(MethodWrapper): """ A MethodWrapper subclass that that associates an environment with a Builder. This mainly exists to wrap the __call__() function so that all calls to Builders can have their argument lists massaged in the same way (treat a lone argument as the source, treat two arguments as target then source, make sure both target and source are lists) without having to have cut-and-paste code to do it. As a bit of obsessive backwards compatibility, we also intercept attempts to get or set the "env" or "builder" attributes, which were the names we used before we put the common functionality into the MethodWrapper base class. We'll keep this around for a while in case people shipped Tool modules that reached into the wrapper (like the Tool/qt.py module does, or did). There shouldn't be a lot attribute fetching or setting on these, so a little extra work shouldn't hurt. """ def __call__(self, target=None, source=_null, *args, **kw): if source is _null: source = target target = None if target is not None and not SCons.Util.is_List(target): target = [target] if source is not None and not SCons.Util.is_List(source): source = [source] return MethodWrapper.__call__(self, target, source, *args, **kw) def __repr__(self): return '<BuilderWrapper %s>' % repr(self.name) def __str__(self): return self.__repr__() def __getattr__(self, name): if name == 'env': return self.object elif name == 'builder': return self.method else: raise AttributeError(name) def __setattr__(self, name, value): if name == 'env': self.object = value elif name == 'builder': self.method = value else: self.__dict__[name] = value # This allows a Builder to be executed directly # through the Environment to which it's attached. # In practice, we shouldn't need this, because # builders actually get executed through a Node. # But we do have a unit test for this, and can't # yet rule out that it would be useful in the # future, so leave it for now. #def execute(self, **kw): # kw['env'] = self.env # self.builder.execute(**kw) class BuilderDict(UserDict): """This is a dictionary-like class used by an Environment to hold the Builders. We need to do this because every time someone changes the Builders in the Environment's BUILDERS dictionary, we must update the Environment's attributes.""" def __init__(self, dict, env): # Set self.env before calling the superclass initialization, # because it will end up calling our other methods, which will # need to point the values in this dictionary to self.env. self.env = env UserDict.__init__(self, dict) def __semi_deepcopy__(self): # These cannot be copied since they would both modify the same builder object, and indeed # just copying would modify the original builder raise TypeError( 'cannot semi_deepcopy a BuilderDict' ) def __setitem__(self, item, val): try: method = getattr(self.env, item).method except AttributeError: pass else: self.env.RemoveMethod(method) UserDict.__setitem__(self, item, val) BuilderWrapper(self.env, val, item) def __delitem__(self, item): UserDict.__delitem__(self, item) delattr(self.env, item) def update(self, dict): for i, v in dict.items(): self.__setitem__(i, v) _is_valid_var = re.compile(r'[_a-zA-Z]\w*$') def is_valid_construction_var(varstr): """Return if the specified string is a legitimate construction variable. """ return _is_valid_var.match(varstr) class SubstitutionEnvironment(object): """Base class for different flavors of construction environments. This class contains a minimal set of methods that handle contruction variable expansion and conversion of strings to Nodes, which may or may not be actually useful as a stand-alone class. Which methods ended up in this class is pretty arbitrary right now. They're basically the ones which we've empirically determined are common to the different construction environment subclasses, and most of the others that use or touch the underlying dictionary of construction variables. Eventually, this class should contain all the methods that we determine are necessary for a "minimal" interface to the build engine. A full "native Python" SCons environment has gotten pretty heavyweight with all of the methods and Tools and construction variables we've jammed in there, so it would be nice to have a lighter weight alternative for interfaces that don't need all of the bells and whistles. (At some point, we'll also probably rename this class "Base," since that more reflects what we want this class to become, but because we've released comments that tell people to subclass Environment.Base to create their own flavors of construction environment, we'll save that for a future refactoring when this class actually becomes useful.) """ if SCons.Memoize.use_memoizer: __metaclass__ = SCons.Memoize.Memoized_Metaclass def __init__(self, **kw): """Initialization of an underlying SubstitutionEnvironment class. """ if SCons.Debug.track_instances: logInstanceCreation(self, 'Environment.SubstitutionEnvironment') self.fs = SCons.Node.FS.get_default_fs() self.ans = SCons.Node.Alias.default_ans self.lookup_list = SCons.Node.arg2nodes_lookups self._dict = kw.copy() self._init_special() self.added_methods = [] #self._memo = {} def _init_special(self): """Initial the dispatch tables for special handling of special construction variables.""" self._special_del = {} self._special_del['SCANNERS'] = _del_SCANNERS self._special_set = {} for key in reserved_construction_var_names: self._special_set[key] = _set_reserved for key in future_reserved_construction_var_names: self._special_set[key] = _set_future_reserved self._special_set['BUILDERS'] = _set_BUILDERS self._special_set['SCANNERS'] = _set_SCANNERS # Freeze the keys of self._special_set in a list for use by # methods that need to check. (Empirically, list scanning has # gotten better than dict.has_key() in Python 2.5.) self._special_set_keys = list(self._special_set.keys()) def __cmp__(self, other): return cmp(self._dict, other._dict) def __delitem__(self, key): special = self._special_del.get(key) if special: special(self, key) else: del self._dict[key] def __getitem__(self, key): return self._dict[key] def __setitem__(self, key, value): # This is heavily used. This implementation is the best we have # according to the timings in bench/env.__setitem__.py. # # The "key in self._special_set_keys" test here seems to perform # pretty well for the number of keys we have. A hard-coded # list works a little better in Python 2.5, but that has the # disadvantage of maybe getting out of sync if we ever add more # variable names. Using self._special_set.has_key() works a # little better in Python 2.4, but is worse than this test. # So right now it seems like a good trade-off, but feel free to # revisit this with bench/env.__setitem__.py as needed (and # as newer versions of Python come out). if key in self._special_set_keys: self._special_set[key](self, key, value) else: # If we already have the entry, then it's obviously a valid # key and we don't need to check. If we do check, using a # global, pre-compiled regular expression directly is more # efficient than calling another function or a method. if key not in self._dict \ and not _is_valid_var.match(key): raise SCons.Errors.UserError("Illegal construction variable `%s'" % key) self._dict[key] = value def get(self, key, default=None): """Emulates the get() method of dictionaries.""" return self._dict.get(key, default) def has_key(self, key): return key in self._dict def __contains__(self, key): return self._dict.__contains__(key) def items(self): return list(self._dict.items()) def arg2nodes(self, args, node_factory=_null, lookup_list=_null, **kw): if node_factory is _null: node_factory = self.fs.File if lookup_list is _null: lookup_list = self.lookup_list if not args: return [] args = SCons.Util.flatten(args) nodes = [] for v in args: if SCons.Util.is_String(v): n = None for l in lookup_list: n = l(v) if n is not None: break if n is not None: if SCons.Util.is_String(n): # n = self.subst(n, raw=1, **kw) kw['raw'] = 1 n = self.subst(n, **kw) if node_factory: n = node_factory(n) if SCons.Util.is_List(n): nodes.extend(n) else: nodes.append(n) elif node_factory: # v = node_factory(self.subst(v, raw=1, **kw)) kw['raw'] = 1 v = node_factory(self.subst(v, **kw)) if SCons.Util.is_List(v): nodes.extend(v) else: nodes.append(v) else: nodes.append(v) return nodes def gvars(self): return self._dict def lvars(self): return {} def subst(self, string, raw=0, target=None, source=None, conv=None, executor=None): """Recursively interpolates construction variables from the Environment into the specified string, returning the expanded result. Construction variables are specified by a $ prefix in the string and begin with an initial underscore or alphabetic character followed by any number of underscores or alphanumeric characters. The construction variable names may be surrounded by curly braces to separate the name from trailing characters. """ gvars = self.gvars() lvars = self.lvars() lvars['__env__'] = self if executor: lvars.update(executor.get_lvars()) return SCons.Subst.scons_subst(string, self, raw, target, source, gvars, lvars, conv) def subst_kw(self, kw, raw=0, target=None, source=None): nkw = {} for k, v in kw.items(): k = self.subst(k, raw, target, source) if SCons.Util.is_String(v): v = self.subst(v, raw, target, source) nkw[k] = v return nkw def subst_list(self, string, raw=0, target=None, source=None, conv=None, executor=None): """Calls through to SCons.Subst.scons_subst_list(). See the documentation for that function.""" gvars = self.gvars() lvars = self.lvars() lvars['__env__'] = self if executor: lvars.update(executor.get_lvars()) return SCons.Subst.scons_subst_list(string, self, raw, target, source, gvars, lvars, conv) def subst_path(self, path, target=None, source=None): """Substitute a path list, turning EntryProxies into Nodes and leaving Nodes (and other objects) as-is.""" if not SCons.Util.is_List(path): path = [path] def s(obj): """This is the "string conversion" routine that we have our substitutions use to return Nodes, not strings. This relies on the fact that an EntryProxy object has a get() method that returns the underlying Node that it wraps, which is a bit of architectural dependence that we might need to break or modify in the future in response to additional requirements.""" try: get = obj.get except AttributeError: obj = SCons.Util.to_String_for_subst(obj) else: obj = get() return obj r = [] for p in path: if SCons.Util.is_String(p): p = self.subst(p, target=target, source=source, conv=s) if SCons.Util.is_List(p): if len(p) == 1: p = p[0] else: # We have an object plus a string, or multiple # objects that we need to smush together. No choice # but to make them into a string. p = ''.join(map(SCons.Util.to_String_for_subst, p)) else: p = s(p) r.append(p) return r subst_target_source = subst def backtick(self, command): import subprocess # common arguments kw = { 'stdin' : 'devnull', 'stdout' : subprocess.PIPE, 'stderr' : subprocess.PIPE, 'universal_newlines' : True, } # if the command is a list, assume it's been quoted # othewise force a shell if not SCons.Util.is_List(command): kw['shell'] = True # run constructed command p = SCons.Action._subproc(self, command, **kw) out,err = p.communicate() status = p.wait() if err: sys.stderr.write(unicode(err)) if status: raise OSError("'%s' exited %d" % (command, status)) return out def AddMethod(self, function, name=None): """ Adds the specified function as a method of this construction environment with the specified name. If the name is omitted, the default name is the name of the function itself. """ method = MethodWrapper(self, function, name) self.added_methods.append(method) def RemoveMethod(self, function): """ Removes the specified function's MethodWrapper from the added_methods list, so we don't re-bind it when making a clone. """ self.added_methods = [dm for dm in self.added_methods if not dm.method is function] def Override(self, overrides): """ Produce a modified environment whose variables are overriden by the overrides dictionaries. "overrides" is a dictionary that will override the variables of this environment. This function is much more efficient than Clone() or creating a new Environment because it doesn't copy the construction environment dictionary, it just wraps the underlying construction environment, and doesn't even create a wrapper object if there are no overrides. """ if not overrides: return self o = copy_non_reserved_keywords(overrides) if not o: return self overrides = {} merges = None for key, value in o.items(): if key == 'parse_flags': merges = value else: overrides[key] = SCons.Subst.scons_subst_once(value, self, key) env = OverrideEnvironment(self, overrides) if merges: env.MergeFlags(merges) return env def ParseFlags(self, *flags): """ Parse the set of flags and return a dict with the flags placed in the appropriate entry. The flags are treated as a typical set of command-line flags for a GNU-like toolchain and used to populate the entries in the dict immediately below. If one of the flag strings begins with a bang (exclamation mark), it is assumed to be a command and the rest of the string is executed; the result of that evaluation is then added to the dict. """ dict = { 'ASFLAGS' : SCons.Util.CLVar(''), 'CFLAGS' : SCons.Util.CLVar(''), 'CCFLAGS' : SCons.Util.CLVar(''), 'CXXFLAGS' : SCons.Util.CLVar(''), 'CPPDEFINES' : [], 'CPPFLAGS' : SCons.Util.CLVar(''), 'CPPPATH' : [], 'FRAMEWORKPATH' : SCons.Util.CLVar(''), 'FRAMEWORKS' : SCons.Util.CLVar(''), 'LIBPATH' : [], 'LIBS' : [], 'LINKFLAGS' : SCons.Util.CLVar(''), 'RPATH' : [], } def do_parse(arg): # if arg is a sequence, recurse with each element if not arg: return if not SCons.Util.is_String(arg): for t in arg: do_parse(t) return # if arg is a command, execute it if arg[0] == '!': arg = self.backtick(arg[1:]) # utility function to deal with -D option def append_define(name, dict = dict): t = name.split('=') if len(t) == 1: dict['CPPDEFINES'].append(name) else: dict['CPPDEFINES'].append([t[0], '='.join(t[1:])]) # Loop through the flags and add them to the appropriate option. # This tries to strike a balance between checking for all possible # flags and keeping the logic to a finite size, so it doesn't # check for some that don't occur often. It particular, if the # flag is not known to occur in a config script and there's a way # of passing the flag to the right place (by wrapping it in a -W # flag, for example) we don't check for it. Note that most # preprocessor options are not handled, since unhandled options # are placed in CCFLAGS, so unless the preprocessor is invoked # separately, these flags will still get to the preprocessor. # Other options not currently handled: # -iqoutedir (preprocessor search path) # -u symbol (linker undefined symbol) # -s (linker strip files) # -static* (linker static binding) # -shared* (linker dynamic binding) # -symbolic (linker global binding) # -R dir (deprecated linker rpath) # IBM compilers may also accept -qframeworkdir=foo params = shlex.split(arg) append_next_arg_to = None # for multi-word args for arg in params: if append_next_arg_to: if append_next_arg_to == 'CPPDEFINES': append_define(arg) elif append_next_arg_to == '-include': t = ('-include', self.fs.File(arg)) dict['CCFLAGS'].append(t) elif append_next_arg_to == '-isysroot': t = ('-isysroot', arg) dict['CCFLAGS'].append(t) dict['LINKFLAGS'].append(t) elif append_next_arg_to == '-arch': t = ('-arch', arg) dict['CCFLAGS'].append(t) dict['LINKFLAGS'].append(t) else: dict[append_next_arg_to].append(arg) append_next_arg_to = None elif not arg[0] in ['-', '+']: dict['LIBS'].append(self.fs.File(arg)) elif arg == '-dylib_file': dict['LINKFLAGS'].append(arg) append_next_arg_to = 'LINKFLAGS' elif arg[:2] == '-L': if arg[2:]: dict['LIBPATH'].append(arg[2:]) else: append_next_arg_to = 'LIBPATH' elif arg[:2] == '-l': if arg[2:]: dict['LIBS'].append(arg[2:]) else: append_next_arg_to = 'LIBS' elif arg[:2] == '-I': if arg[2:]: dict['CPPPATH'].append(arg[2:]) else: append_next_arg_to = 'CPPPATH' elif arg[:4] == '-Wa,': dict['ASFLAGS'].append(arg[4:]) dict['CCFLAGS'].append(arg) elif arg[:4] == '-Wl,': if arg[:11] == '-Wl,-rpath=': dict['RPATH'].append(arg[11:]) elif arg[:7] == '-Wl,-R,': dict['RPATH'].append(arg[7:]) elif arg[:6] == '-Wl,-R': dict['RPATH'].append(arg[6:]) else: dict['LINKFLAGS'].append(arg) elif arg[:4] == '-Wp,': dict['CPPFLAGS'].append(arg) elif arg[:2] == '-D': if arg[2:]: append_define(arg[2:]) else: append_next_arg_to = 'CPPDEFINES' elif arg == '-framework': append_next_arg_to = 'FRAMEWORKS' elif arg[:14] == '-frameworkdir=': dict['FRAMEWORKPATH'].append(arg[14:]) elif arg[:2] == '-F': if arg[2:]: dict['FRAMEWORKPATH'].append(arg[2:]) else: append_next_arg_to = 'FRAMEWORKPATH' elif arg in ['-mno-cygwin', '-pthread', '-openmp', '-fopenmp']: dict['CCFLAGS'].append(arg) dict['LINKFLAGS'].append(arg) elif arg == '-mwindows': dict['LINKFLAGS'].append(arg) elif arg[:5] == '-std=': if arg[5:].find('++')!=-1: key='CXXFLAGS' else: key='CFLAGS' dict[key].append(arg) elif arg[0] == '+': dict['CCFLAGS'].append(arg) dict['LINKFLAGS'].append(arg) elif arg in ['-include', '-isysroot', '-arch']: append_next_arg_to = arg else: dict['CCFLAGS'].append(arg) for arg in flags: do_parse(arg) return dict def MergeFlags(self, args, unique=1, dict=None): """ Merge the dict in args into the construction variables of this env, or the passed-in dict. If args is not a dict, it is converted into a dict using ParseFlags. If unique is not set, the flags are appended rather than merged. """ if dict is None: dict = self if not SCons.Util.is_Dict(args): args = self.ParseFlags(args) if not unique: self.Append(**args) return self for key, value in args.items(): if not value: continue try: orig = self[key] except KeyError: orig = value else: if not orig: orig = value elif value: # Add orig and value. The logic here was lifted from # part of env.Append() (see there for a lot of comments # about the order in which things are tried) and is # used mainly to handle coercion of strings to CLVar to # "do the right thing" given (e.g.) an original CCFLAGS # string variable like '-pipe -Wall'. try: orig = orig + value except (KeyError, TypeError): try: add_to_orig = orig.append except AttributeError: value.insert(0, orig) orig = value else: add_to_orig(value) t = [] if key[-4:] == 'PATH': ### keep left-most occurence for v in orig: if v not in t: t.append(v) else: ### keep right-most occurence orig.reverse() for v in orig: if v not in t: t.insert(0, v) self[key] = t return self # def MergeShellPaths(self, args, prepend=1): # """ # Merge the dict in args into the shell environment in env['ENV']. # Shell path elements are appended or prepended according to prepend. # Uses Pre/AppendENVPath, so it always appends or prepends uniquely. # Example: env.MergeShellPaths({'LIBPATH': '/usr/local/lib'}) # prepends /usr/local/lib to env['ENV']['LIBPATH']. # """ # for pathname, pathval in args.items(): # if not pathval: # continue # if prepend: # self.PrependENVPath(pathname, pathval) # else: # self.AppendENVPath(pathname, pathval) def default_decide_source(dependency, target, prev_ni): f = SCons.Defaults.DefaultEnvironment().decide_source return f(dependency, target, prev_ni) def default_decide_target(dependency, target, prev_ni): f = SCons.Defaults.DefaultEnvironment().decide_target return f(dependency, target, prev_ni) def default_copy_from_cache(src, dst): f = SCons.Defaults.DefaultEnvironment().copy_from_cache return f(src, dst) class Base(SubstitutionEnvironment): """Base class for "real" construction Environments. These are the primary objects used to communicate dependency and construction information to the build engine. Keyword arguments supplied when the construction Environment is created are construction variables used to initialize the Environment. """ memoizer_counters = [] ####################################################################### # This is THE class for interacting with the SCons build engine, # and it contains a lot of stuff, so we're going to try to keep this # a little organized by grouping the methods. ####################################################################### ####################################################################### # Methods that make an Environment act like a dictionary. These have # the expected standard names for Python mapping objects. Note that # we don't actually make an Environment a subclass of UserDict for # performance reasons. Note also that we only supply methods for # dictionary functionality that we actually need and use. ####################################################################### def __init__(self, platform=None, tools=None, toolpath=None, variables=None, parse_flags = None, **kw): """ Initialization of a basic SCons construction environment, including setting up special construction variables like BUILDER, PLATFORM, etc., and searching for and applying available Tools. Note that we do *not* call the underlying base class (SubsitutionEnvironment) initialization, because we need to initialize things in a very specific order that doesn't work with the much simpler base class initialization. """ if SCons.Debug.track_instances: logInstanceCreation(self, 'Environment.Base') self._memo = {} self.fs = SCons.Node.FS.get_default_fs() self.ans = SCons.Node.Alias.default_ans self.lookup_list = SCons.Node.arg2nodes_lookups self._dict = semi_deepcopy(SCons.Defaults.ConstructionEnvironment) self._init_special() self.added_methods = [] # We don't use AddMethod, or define these as methods in this # class, because we *don't* want these functions to be bound # methods. They need to operate independently so that the # settings will work properly regardless of whether a given # target ends up being built with a Base environment or an # OverrideEnvironment or what have you. self.decide_target = default_decide_target self.decide_source = default_decide_source self.copy_from_cache = default_copy_from_cache self._dict['BUILDERS'] = BuilderDict(self._dict['BUILDERS'], self) if platform is None: platform = self._dict.get('PLATFORM', None) if platform is None: platform = SCons.Platform.Platform() if SCons.Util.is_String(platform): platform = SCons.Platform.Platform(platform) self._dict['PLATFORM'] = str(platform) platform(self) self._dict['HOST_OS'] = self._dict.get('HOST_OS',None) self._dict['HOST_ARCH'] = self._dict.get('HOST_ARCH',None) # Now set defaults for TARGET_{OS|ARCH} self._dict['TARGET_OS'] = self._dict.get('TARGET_OS',None) self._dict['TARGET_ARCH'] = self._dict.get('TARGET_ARCH',None) # Apply the passed-in and customizable variables to the # environment before calling the tools, because they may use # some of them during initialization. if 'options' in kw: # Backwards compatibility: they may stll be using the # old "options" keyword. variables = kw['options'] del kw['options'] self.Replace(**kw) keys = list(kw.keys()) if variables: keys = keys + list(variables.keys()) variables.Update(self) save = {} for k in keys: try: save[k] = self._dict[k] except KeyError: # No value may have been set if they tried to pass in a # reserved variable name like TARGETS. pass SCons.Tool.Initializers(self) if tools is None: tools = self._dict.get('TOOLS', None) if tools is None: tools = ['default'] apply_tools(self, tools, toolpath) # Now restore the passed-in and customized variables # to the environment, since the values the user set explicitly # should override any values set by the tools. for key, val in save.items(): self._dict[key] = val # Finally, apply any flags to be merged in if parse_flags: self.MergeFlags(parse_flags) ####################################################################### # Utility methods that are primarily for internal use by SCons. # These begin with lower-case letters. ####################################################################### def get_builder(self, name): """Fetch the builder with the specified name from the environment. """ try: return self._dict['BUILDERS'][name] except KeyError: return None def get_CacheDir(self): try: path = self._CacheDir_path except AttributeError: path = SCons.Defaults.DefaultEnvironment()._CacheDir_path try: if path == self._last_CacheDir_path: return self._last_CacheDir except AttributeError: pass cd = SCons.CacheDir.CacheDir(path) self._last_CacheDir_path = path self._last_CacheDir = cd return cd def get_factory(self, factory, default='File'): """Return a factory function for creating Nodes for this construction environment. """ name = default try: is_node = issubclass(factory, SCons.Node.FS.Base) except TypeError: # The specified factory isn't a Node itself--it's # most likely None, or possibly a callable. pass else: if is_node: # The specified factory is a Node (sub)class. Try to # return the FS method that corresponds to the Node's # name--that is, we return self.fs.Dir if they want a Dir, # self.fs.File for a File, etc. try: name = factory.__name__ except AttributeError: pass else: factory = None if not factory: # They passed us None, or we picked up a name from a specified # class, so return the FS method. (Note that we *don't* # use our own self.{Dir,File} methods because that would # cause env.subst() to be called twice on the file name, # interfering with files that have $$ in them.) factory = getattr(self.fs, name) return factory memoizer_counters.append(SCons.Memoize.CountValue('_gsm')) def _gsm(self): try: return self._memo['_gsm'] except KeyError: pass result = {} try: scanners = self._dict['SCANNERS'] except KeyError: pass else: # Reverse the scanner list so that, if multiple scanners # claim they can scan the same suffix, earlier scanners # in the list will overwrite later scanners, so that # the result looks like a "first match" to the user. if not SCons.Util.is_List(scanners): scanners = [scanners] else: scanners = scanners[:] # copy so reverse() doesn't mod original scanners.reverse() for scanner in scanners: for k in scanner.get_skeys(self): if k and self['PLATFORM'] == 'win32': k = k.lower() result[k] = scanner self._memo['_gsm'] = result return result def get_scanner(self, skey): """Find the appropriate scanner given a key (usually a file suffix). """ if skey and self['PLATFORM'] == 'win32': skey = skey.lower() return self._gsm().get(skey) def scanner_map_delete(self, kw=None): """Delete the cached scanner map (if we need to). """ try: del self._memo['_gsm'] except KeyError: pass def _update(self, dict): """Update an environment's values directly, bypassing the normal checks that occur when users try to set items. """ self._dict.update(dict) def get_src_sig_type(self): try: return self.src_sig_type except AttributeError: t = SCons.Defaults.DefaultEnvironment().src_sig_type self.src_sig_type = t return t def get_tgt_sig_type(self): try: return self.tgt_sig_type except AttributeError: t = SCons.Defaults.DefaultEnvironment().tgt_sig_type self.tgt_sig_type = t return t ####################################################################### # Public methods for manipulating an Environment. These begin with # upper-case letters. The essential characteristic of methods in # this section is that they do *not* have corresponding same-named # global functions. For example, a stand-alone Append() function # makes no sense, because Append() is all about appending values to # an Environment's construction variables. ####################################################################### def Append(self, **kw): """Append values to existing construction variables in an Environment. """ kw = copy_non_reserved_keywords(kw) for key, val in kw.items(): # It would be easier on the eyes to write this using # "continue" statements whenever we finish processing an item, # but Python 1.5.2 apparently doesn't let you use "continue" # within try:-except: blocks, so we have to nest our code. try: if key == 'CPPDEFINES' and SCons.Util.is_String(self._dict[key]): self._dict[key] = [self._dict[key]] orig = self._dict[key] except KeyError: # No existing variable in the environment, so just set # it to the new value. if key == 'CPPDEFINES' and SCons.Util.is_String(val): self._dict[key] = [val] else: self._dict[key] = val else: try: # Check if the original looks like a dictionary. # If it is, we can't just try adding the value because # dictionaries don't have __add__() methods, and # things like UserList will incorrectly coerce the # original dict to a list (which we don't want). update_dict = orig.update except AttributeError: try: # Most straightforward: just try to add them # together. This will work in most cases, when the # original and new values are of compatible types. self._dict[key] = orig + val except (KeyError, TypeError): try: # Check if the original is a list. add_to_orig = orig.append except AttributeError: # The original isn't a list, but the new # value is (by process of elimination), # so insert the original in the new value # (if there's one to insert) and replace # the variable with it. if orig: val.insert(0, orig) self._dict[key] = val else: # The original is a list, so append the new # value to it (if there's a value to append). if val: add_to_orig(val) else: # The original looks like a dictionary, so update it # based on what we think the value looks like. if SCons.Util.is_List(val): if key == 'CPPDEFINES': orig = orig.items() orig += val self._dict[key] = orig else: for v in val: orig[v] = None else: try: update_dict(val) except (AttributeError, TypeError, ValueError): if SCons.Util.is_Dict(val): for k, v in val.items(): orig[k] = v else: orig[val] = None self.scanner_map_delete(kw) # allow Dirs and strings beginning with # for top-relative # Note this uses the current env's fs (in self). def _canonicalize(self, path): if not SCons.Util.is_String(path): # typically a Dir path = str(path) if path and path[0] == '#': path = str(self.fs.Dir(path)) return path def AppendENVPath(self, name, newpath, envname = 'ENV', sep = os.pathsep, delete_existing=1): """Append path elements to the path 'name' in the 'ENV' dictionary for this environment. Will only add any particular path once, and will normpath and normcase all paths to help assure this. This can also handle the case where the env variable is a list instead of a string. If delete_existing is 0, a newpath which is already in the path will not be moved to the end (it will be left where it is). """ orig = '' if envname in self._dict and name in self._dict[envname]: orig = self._dict[envname][name] nv = SCons.Util.AppendPath(orig, newpath, sep, delete_existing, canonicalize=self._canonicalize) if envname not in self._dict: self._dict[envname] = {} self._dict[envname][name] = nv def AppendUnique(self, delete_existing=0, **kw): """Append values to existing construction variables in an Environment, if they're not already there. If delete_existing is 1, removes existing values first, so values move to end. """ kw = copy_non_reserved_keywords(kw) for key, val in kw.items(): if SCons.Util.is_List(val): val = _delete_duplicates(val, delete_existing) if key not in self._dict or self._dict[key] in ('', None): self._dict[key] = val elif SCons.Util.is_Dict(self._dict[key]) and \ SCons.Util.is_Dict(val): self._dict[key].update(val) elif SCons.Util.is_List(val): dk = self._dict[key] if key == 'CPPDEFINES': tmp = [] for i in val: if SCons.Util.is_List(i): if len(i) >= 2: tmp.append((i[0], i[1])) else: tmp.append((i[0],)) elif SCons.Util.is_Tuple(i): tmp.append(i) else: tmp.append((i,)) val = tmp if SCons.Util.is_Dict(dk): dk = dk.items() elif SCons.Util.is_String(dk): dk = [(dk,)] else: tmp = [] for i in dk: if SCons.Util.is_List(i): if len(i) >= 2: tmp.append((i[0], i[1])) else: tmp.append((i[0],)) elif SCons.Util.is_Tuple(i): tmp.append(i) else: tmp.append((i,)) dk = tmp else: if not SCons.Util.is_List(dk): dk = [dk] if delete_existing: dk = [x for x in dk if x not in val] else: val = [x for x in val if x not in dk] self._dict[key] = dk + val else: dk = self._dict[key] if SCons.Util.is_List(dk): if key == 'CPPDEFINES': tmp = [] for i in dk: if SCons.Util.is_List(i): if len(i) >= 2: tmp.append((i[0], i[1])) else: tmp.append((i[0],)) elif SCons.Util.is_Tuple(i): tmp.append(i) else: tmp.append((i,)) dk = tmp if SCons.Util.is_Dict(val): val = val.items() elif SCons.Util.is_String(val): val = [(val,)] if delete_existing: dk = filter(lambda x, val=val: x not in val, dk) self._dict[key] = dk + val else: dk = [x for x in dk if x not in val] self._dict[key] = dk + val else: # By elimination, val is not a list. Since dk is a # list, wrap val in a list first. if delete_existing: dk = filter(lambda x, val=val: x not in val, dk) self._dict[key] = dk + [val] else: if not val in dk: self._dict[key] = dk + [val] else: if key == 'CPPDEFINES': if SCons.Util.is_String(dk): dk = [dk] elif SCons.Util.is_Dict(dk): dk = dk.items() if SCons.Util.is_String(val): if val in dk: val = [] else: val = [val] elif SCons.Util.is_Dict(val): tmp = [] for i,j in val.iteritems(): if j is not None: tmp.append((i,j)) else: tmp.append(i) val = tmp if delete_existing: dk = [x for x in dk if x not in val] self._dict[key] = dk + val self.scanner_map_delete(kw) def Clone(self, tools=[], toolpath=None, parse_flags = None, **kw): """Return a copy of a construction Environment. The copy is like a Python "deep copy"--that is, independent copies are made recursively of each objects--except that a reference is copied when an object is not deep-copyable (like a function). There are no references to any mutable objects in the original Environment. """ try: builders = self._dict['BUILDERS'] except KeyError: pass clone = copy.copy(self) # BUILDERS is not safe to do a simple copy clone._dict = semi_deepcopy_dict(self._dict, ['BUILDERS']) clone._dict['BUILDERS'] = BuilderDict(builders, clone) # Check the methods added via AddMethod() and re-bind them to # the cloned environment. Only do this if the attribute hasn't # been overwritten by the user explicitly and still points to # the added method. clone.added_methods = [] for mw in self.added_methods: if mw == getattr(self, mw.name): clone.added_methods.append(mw.clone(clone)) clone._memo = {} # Apply passed-in variables before the tools # so the tools can use the new variables kw = copy_non_reserved_keywords(kw) new = {} for key, value in kw.items(): new[key] = SCons.Subst.scons_subst_once(value, self, key) clone.Replace(**new) apply_tools(clone, tools, toolpath) # apply them again in case the tools overwrote them clone.Replace(**new) # Finally, apply any flags to be merged in if parse_flags: clone.MergeFlags(parse_flags) if SCons.Debug.track_instances: logInstanceCreation(self, 'Environment.EnvironmentClone') return clone def Copy(self, *args, **kw): global _warn_copy_deprecated if _warn_copy_deprecated: msg = "The env.Copy() method is deprecated; use the env.Clone() method instead." SCons.Warnings.warn(SCons.Warnings.DeprecatedCopyWarning, msg) _warn_copy_deprecated = False return self.Clone(*args, **kw) def _changed_build(self, dependency, target, prev_ni): if dependency.changed_state(target, prev_ni): return 1 return self.decide_source(dependency, target, prev_ni) def _changed_content(self, dependency, target, prev_ni): return dependency.changed_content(target, prev_ni) def _changed_source(self, dependency, target, prev_ni): target_env = dependency.get_build_env() type = target_env.get_tgt_sig_type() if type == 'source': return target_env.decide_source(dependency, target, prev_ni) else: return target_env.decide_target(dependency, target, prev_ni) def _changed_timestamp_then_content(self, dependency, target, prev_ni): return dependency.changed_timestamp_then_content(target, prev_ni) def _changed_timestamp_newer(self, dependency, target, prev_ni): return dependency.changed_timestamp_newer(target, prev_ni) def _changed_timestamp_match(self, dependency, target, prev_ni): return dependency.changed_timestamp_match(target, prev_ni) def _copy_from_cache(self, src, dst): return self.fs.copy(src, dst) def _copy2_from_cache(self, src, dst): return self.fs.copy2(src, dst) def Decider(self, function): copy_function = self._copy2_from_cache if function in ('MD5', 'content'): if not SCons.Util.md5: raise UserError("MD5 signatures are not available in this version of Python.") function = self._changed_content elif function == 'MD5-timestamp': function = self._changed_timestamp_then_content elif function in ('timestamp-newer', 'make'): function = self._changed_timestamp_newer copy_function = self._copy_from_cache elif function == 'timestamp-match': function = self._changed_timestamp_match elif not callable(function): raise UserError("Unknown Decider value %s" % repr(function)) # We don't use AddMethod because we don't want to turn the # function, which only expects three arguments, into a bound # method, which would add self as an initial, fourth argument. self.decide_target = function self.decide_source = function self.copy_from_cache = copy_function def Detect(self, progs): """Return the first available program in progs. """ if not SCons.Util.is_List(progs): progs = [ progs ] for prog in progs: path = self.WhereIs(prog) if path: return prog return None def Dictionary(self, *args): if not args: return self._dict dlist = [self._dict[x] for x in args] if len(dlist) == 1: dlist = dlist[0] return dlist def Dump(self, key = None): """ Using the standard Python pretty printer, dump the contents of the scons build environment to stdout. If the key passed in is anything other than None, then that will be used as an index into the build environment dictionary and whatever is found there will be fed into the pretty printer. Note that this key is case sensitive. """ import pprint pp = pprint.PrettyPrinter(indent=2) if key: dict = self.Dictionary(key) else: dict = self.Dictionary() return pp.pformat(dict) def FindIxes(self, paths, prefix, suffix): """ Search a list of paths for something that matches the prefix and suffix. paths - the list of paths or nodes.<|fim▁hole|> prefix - construction variable for the prefix. suffix - construction variable for the suffix. """ suffix = self.subst('$'+suffix) prefix = self.subst('$'+prefix) for path in paths: dir,name = os.path.split(str(path)) if name[:len(prefix)] == prefix and name[-len(suffix):] == suffix: return path def ParseConfig(self, command, function=None, unique=1): """ Use the specified function to parse the output of the command in order to modify the current environment. The 'command' can be a string or a list of strings representing a command and its arguments. 'Function' is an optional argument that takes the environment, the output of the command, and the unique flag. If no function is specified, MergeFlags, which treats the output as the result of a typical 'X-config' command (i.e. gtk-config), will merge the output into the appropriate variables. """ if function is None: def parse_conf(env, cmd, unique=unique): return env.MergeFlags(cmd, unique) function = parse_conf if SCons.Util.is_List(command): command = ' '.join(command) command = self.subst(command) return function(self, self.backtick(command)) def ParseDepends(self, filename, must_exist=None, only_one=0): """ Parse a mkdep-style file for explicit dependencies. This is completely abusable, and should be unnecessary in the "normal" case of proper SCons configuration, but it may help make the transition from a Make hierarchy easier for some people to swallow. It can also be genuinely useful when using a tool that can write a .d file, but for which writing a scanner would be too complicated. """ filename = self.subst(filename) try: fp = open(filename, 'r') except IOError: if must_exist: raise return lines = SCons.Util.LogicalLines(fp).readlines() lines = [l for l in lines if l[0] != '#'] tdlist = [] for line in lines: try: target, depends = line.split(':', 1) except (AttributeError, ValueError): # Throws AttributeError if line isn't a string. Can throw # ValueError if line doesn't split into two or more elements. pass else: tdlist.append((target.split(), depends.split())) if only_one: targets = [] for td in tdlist: targets.extend(td[0]) if len(targets) > 1: raise SCons.Errors.UserError( "More than one dependency target found in `%s': %s" % (filename, targets)) for target, depends in tdlist: self.Depends(target, depends) def Platform(self, platform): platform = self.subst(platform) return SCons.Platform.Platform(platform)(self) def Prepend(self, **kw): """Prepend values to existing construction variables in an Environment. """ kw = copy_non_reserved_keywords(kw) for key, val in kw.items(): # It would be easier on the eyes to write this using # "continue" statements whenever we finish processing an item, # but Python 1.5.2 apparently doesn't let you use "continue" # within try:-except: blocks, so we have to nest our code. try: orig = self._dict[key] except KeyError: # No existing variable in the environment, so just set # it to the new value. self._dict[key] = val else: try: # Check if the original looks like a dictionary. # If it is, we can't just try adding the value because # dictionaries don't have __add__() methods, and # things like UserList will incorrectly coerce the # original dict to a list (which we don't want). update_dict = orig.update except AttributeError: try: # Most straightforward: just try to add them # together. This will work in most cases, when the # original and new values are of compatible types. self._dict[key] = val + orig except (KeyError, TypeError): try: # Check if the added value is a list. add_to_val = val.append except AttributeError: # The added value isn't a list, but the # original is (by process of elimination), # so insert the the new value in the original # (if there's one to insert). if val: orig.insert(0, val) else: # The added value is a list, so append # the original to it (if there's a value # to append). if orig: add_to_val(orig) self._dict[key] = val else: # The original looks like a dictionary, so update it # based on what we think the value looks like. if SCons.Util.is_List(val): for v in val: orig[v] = None else: try: update_dict(val) except (AttributeError, TypeError, ValueError): if SCons.Util.is_Dict(val): for k, v in val.items(): orig[k] = v else: orig[val] = None self.scanner_map_delete(kw) def PrependENVPath(self, name, newpath, envname = 'ENV', sep = os.pathsep, delete_existing=1): """Prepend path elements to the path 'name' in the 'ENV' dictionary for this environment. Will only add any particular path once, and will normpath and normcase all paths to help assure this. This can also handle the case where the env variable is a list instead of a string. If delete_existing is 0, a newpath which is already in the path will not be moved to the front (it will be left where it is). """ orig = '' if envname in self._dict and name in self._dict[envname]: orig = self._dict[envname][name] nv = SCons.Util.PrependPath(orig, newpath, sep, delete_existing, canonicalize=self._canonicalize) if envname not in self._dict: self._dict[envname] = {} self._dict[envname][name] = nv def PrependUnique(self, delete_existing=0, **kw): """Prepend values to existing construction variables in an Environment, if they're not already there. If delete_existing is 1, removes existing values first, so values move to front. """ kw = copy_non_reserved_keywords(kw) for key, val in kw.items(): if SCons.Util.is_List(val): val = _delete_duplicates(val, not delete_existing) if key not in self._dict or self._dict[key] in ('', None): self._dict[key] = val elif SCons.Util.is_Dict(self._dict[key]) and \ SCons.Util.is_Dict(val): self._dict[key].update(val) elif SCons.Util.is_List(val): dk = self._dict[key] if not SCons.Util.is_List(dk): dk = [dk] if delete_existing: dk = [x for x in dk if x not in val] else: val = [x for x in val if x not in dk] self._dict[key] = val + dk else: dk = self._dict[key] if SCons.Util.is_List(dk): # By elimination, val is not a list. Since dk is a # list, wrap val in a list first. if delete_existing: dk = [x for x in dk if x not in val] self._dict[key] = [val] + dk else: if not val in dk: self._dict[key] = [val] + dk else: if delete_existing: dk = [x for x in dk if x not in val] self._dict[key] = val + dk self.scanner_map_delete(kw) def Replace(self, **kw): """Replace existing construction variables in an Environment with new construction variables and/or values. """ try: kwbd = kw['BUILDERS'] except KeyError: pass else: kwbd = BuilderDict(kwbd,self) del kw['BUILDERS'] self.__setitem__('BUILDERS', kwbd) kw = copy_non_reserved_keywords(kw) self._update(semi_deepcopy(kw)) self.scanner_map_delete(kw) def ReplaceIxes(self, path, old_prefix, old_suffix, new_prefix, new_suffix): """ Replace old_prefix with new_prefix and old_suffix with new_suffix. env - Environment used to interpolate variables. path - the path that will be modified. old_prefix - construction variable for the old prefix. old_suffix - construction variable for the old suffix. new_prefix - construction variable for the new prefix. new_suffix - construction variable for the new suffix. """ old_prefix = self.subst('$'+old_prefix) old_suffix = self.subst('$'+old_suffix) new_prefix = self.subst('$'+new_prefix) new_suffix = self.subst('$'+new_suffix) dir,name = os.path.split(str(path)) if name[:len(old_prefix)] == old_prefix: name = name[len(old_prefix):] if name[-len(old_suffix):] == old_suffix: name = name[:-len(old_suffix)] return os.path.join(dir, new_prefix+name+new_suffix) def SetDefault(self, **kw): for k in kw.keys(): if k in self._dict: del kw[k] self.Replace(**kw) def _find_toolpath_dir(self, tp): return self.fs.Dir(self.subst(tp)).srcnode().abspath def Tool(self, tool, toolpath=None, **kw): if SCons.Util.is_String(tool): tool = self.subst(tool) if toolpath is None: toolpath = self.get('toolpath', []) toolpath = list(map(self._find_toolpath_dir, toolpath)) tool = SCons.Tool.Tool(tool, toolpath, **kw) tool(self) def WhereIs(self, prog, path=None, pathext=None, reject=[]): """Find prog in the path. """ if path is None: try: path = self['ENV']['PATH'] except KeyError: pass elif SCons.Util.is_String(path): path = self.subst(path) if pathext is None: try: pathext = self['ENV']['PATHEXT'] except KeyError: pass elif SCons.Util.is_String(pathext): pathext = self.subst(pathext) prog = self.subst(prog) path = SCons.Util.WhereIs(prog, path, pathext, reject) if path: return path return None ####################################################################### # Public methods for doing real "SCons stuff" (manipulating # dependencies, setting attributes on targets, etc.). These begin # with upper-case letters. The essential characteristic of methods # in this section is that they all *should* have corresponding # same-named global functions. ####################################################################### def Action(self, *args, **kw): def subst_string(a, self=self): if SCons.Util.is_String(a): a = self.subst(a) return a nargs = list(map(subst_string, args)) nkw = self.subst_kw(kw) return SCons.Action.Action(*nargs, **nkw) def AddPreAction(self, files, action): nodes = self.arg2nodes(files, self.fs.Entry) action = SCons.Action.Action(action) uniq = {} for executor in [n.get_executor() for n in nodes]: uniq[executor] = 1 for executor in uniq.keys(): executor.add_pre_action(action) return nodes def AddPostAction(self, files, action): nodes = self.arg2nodes(files, self.fs.Entry) action = SCons.Action.Action(action) uniq = {} for executor in [n.get_executor() for n in nodes]: uniq[executor] = 1 for executor in uniq.keys(): executor.add_post_action(action) return nodes def Alias(self, target, source=[], action=None, **kw): tlist = self.arg2nodes(target, self.ans.Alias) if not SCons.Util.is_List(source): source = [source] source = [_f for _f in source if _f] if not action: if not source: # There are no source files and no action, so just # return a target list of classic Alias Nodes, without # any builder. The externally visible effect is that # this will make the wrapping Script.BuildTask class # say that there's "Nothing to be done" for this Alias, # instead of that it's "up to date." return tlist # No action, but there are sources. Re-call all the target # builders to add the sources to each target. result = [] for t in tlist: bld = t.get_builder(AliasBuilder) result.extend(bld(self, t, source)) return result nkw = self.subst_kw(kw) nkw.update({ 'action' : SCons.Action.Action(action), 'source_factory' : self.fs.Entry, 'multi' : 1, 'is_explicit' : None, }) bld = SCons.Builder.Builder(**nkw) # Apply the Builder separately to each target so that the Aliases # stay separate. If we did one "normal" Builder call with the # whole target list, then all of the target Aliases would be # associated under a single Executor. result = [] for t in tlist: # Calling the convert() method will cause a new Executor to be # created from scratch, so we have to explicitly initialize # it with the target's existing sources, plus our new ones, # so nothing gets lost. b = t.get_builder() if b is None or b is AliasBuilder: b = bld else: nkw['action'] = b.action + action b = SCons.Builder.Builder(**nkw) t.convert() result.extend(b(self, t, t.sources + source)) return result def AlwaysBuild(self, *targets): tlist = [] for t in targets: tlist.extend(self.arg2nodes(t, self.fs.Entry)) for t in tlist: t.set_always_build() return tlist def BuildDir(self, *args, **kw): msg = """BuildDir() and the build_dir keyword have been deprecated;\n\tuse VariantDir() and the variant_dir keyword instead.""" SCons.Warnings.warn(SCons.Warnings.DeprecatedBuildDirWarning, msg) if 'build_dir' in kw: kw['variant_dir'] = kw['build_dir'] del kw['build_dir'] return self.VariantDir(*args, **kw) def Builder(self, **kw): nkw = self.subst_kw(kw) return SCons.Builder.Builder(**nkw) def CacheDir(self, path): import SCons.CacheDir if path is not None: path = self.subst(path) self._CacheDir_path = path def Clean(self, targets, files): global CleanTargets tlist = self.arg2nodes(targets, self.fs.Entry) flist = self.arg2nodes(files, self.fs.Entry) for t in tlist: try: CleanTargets[t].extend(flist) except KeyError: CleanTargets[t] = flist def Configure(self, *args, **kw): nargs = [self] if args: nargs = nargs + self.subst_list(args)[0] nkw = self.subst_kw(kw) nkw['_depth'] = kw.get('_depth', 0) + 1 try: nkw['custom_tests'] = self.subst_kw(nkw['custom_tests']) except KeyError: pass return SCons.SConf.SConf(*nargs, **nkw) def Command(self, target, source, action, **kw): """Builds the supplied target files from the supplied source files using the supplied action. Action may be any type that the Builder constructor will accept for an action.""" bkw = { 'action' : action, 'target_factory' : self.fs.Entry, 'source_factory' : self.fs.Entry, } try: bkw['source_scanner'] = kw['source_scanner'] except KeyError: pass else: del kw['source_scanner'] bld = SCons.Builder.Builder(**bkw) return bld(self, target, source, **kw) def Depends(self, target, dependency): """Explicity specify that 'target's depend on 'dependency'.""" tlist = self.arg2nodes(target, self.fs.Entry) dlist = self.arg2nodes(dependency, self.fs.Entry) for t in tlist: t.add_dependency(dlist) return tlist def Dir(self, name, *args, **kw): """ """ s = self.subst(name) if SCons.Util.is_Sequence(s): result=[] for e in s: result.append(self.fs.Dir(e, *args, **kw)) return result return self.fs.Dir(s, *args, **kw) def NoClean(self, *targets): """Tags a target so that it will not be cleaned by -c""" tlist = [] for t in targets: tlist.extend(self.arg2nodes(t, self.fs.Entry)) for t in tlist: t.set_noclean() return tlist def NoCache(self, *targets): """Tags a target so that it will not be cached""" tlist = [] for t in targets: tlist.extend(self.arg2nodes(t, self.fs.Entry)) for t in tlist: t.set_nocache() return tlist def Entry(self, name, *args, **kw): """ """ s = self.subst(name) if SCons.Util.is_Sequence(s): result=[] for e in s: result.append(self.fs.Entry(e, *args, **kw)) return result return self.fs.Entry(s, *args, **kw) def Environment(self, **kw): return SCons.Environment.Environment(**self.subst_kw(kw)) def Execute(self, action, *args, **kw): """Directly execute an action through an Environment """ action = self.Action(action, *args, **kw) result = action([], [], self) if isinstance(result, SCons.Errors.BuildError): errstr = result.errstr if result.filename: errstr = result.filename + ': ' + errstr sys.stderr.write("scons: *** %s\n" % errstr) return result.status else: return result def File(self, name, *args, **kw): """ """ s = self.subst(name) if SCons.Util.is_Sequence(s): result=[] for e in s: result.append(self.fs.File(e, *args, **kw)) return result return self.fs.File(s, *args, **kw) def FindFile(self, file, dirs): file = self.subst(file) nodes = self.arg2nodes(dirs, self.fs.Dir) return SCons.Node.FS.find_file(file, tuple(nodes)) def Flatten(self, sequence): return SCons.Util.flatten(sequence) def GetBuildPath(self, files): result = list(map(str, self.arg2nodes(files, self.fs.Entry))) if SCons.Util.is_List(files): return result else: return result[0] def Glob(self, pattern, ondisk=True, source=False, strings=False): return self.fs.Glob(self.subst(pattern), ondisk, source, strings) def Ignore(self, target, dependency): """Ignore a dependency.""" tlist = self.arg2nodes(target, self.fs.Entry) dlist = self.arg2nodes(dependency, self.fs.Entry) for t in tlist: t.add_ignore(dlist) return tlist def Literal(self, string): return SCons.Subst.Literal(string) def Local(self, *targets): ret = [] for targ in targets: if isinstance(targ, SCons.Node.Node): targ.set_local() ret.append(targ) else: for t in self.arg2nodes(targ, self.fs.Entry): t.set_local() ret.append(t) return ret def Precious(self, *targets): tlist = [] for t in targets: tlist.extend(self.arg2nodes(t, self.fs.Entry)) for t in tlist: t.set_precious() return tlist def Pseudo(self, *targets): tlist = [] for t in targets: tlist.extend(self.arg2nodes(t, self.fs.Entry)) for t in tlist: t.set_pseudo() return tlist def Repository(self, *dirs, **kw): dirs = self.arg2nodes(list(dirs), self.fs.Dir) self.fs.Repository(*dirs, **kw) def Requires(self, target, prerequisite): """Specify that 'prerequisite' must be built before 'target', (but 'target' does not actually depend on 'prerequisite' and need not be rebuilt if it changes).""" tlist = self.arg2nodes(target, self.fs.Entry) plist = self.arg2nodes(prerequisite, self.fs.Entry) for t in tlist: t.add_prerequisite(plist) return tlist def Scanner(self, *args, **kw): nargs = [] for arg in args: if SCons.Util.is_String(arg): arg = self.subst(arg) nargs.append(arg) nkw = self.subst_kw(kw) return SCons.Scanner.Base(*nargs, **nkw) def SConsignFile(self, name=".sconsign", dbm_module=None): if name is not None: name = self.subst(name) if not os.path.isabs(name): name = os.path.join(str(self.fs.SConstruct_dir), name) if name: name = os.path.normpath(name) sconsign_dir = os.path.dirname(name) if sconsign_dir and not os.path.exists(sconsign_dir): self.Execute(SCons.Defaults.Mkdir(sconsign_dir)) SCons.SConsign.File(name, dbm_module) def SideEffect(self, side_effect, target): """Tell scons that side_effects are built as side effects of building targets.""" side_effects = self.arg2nodes(side_effect, self.fs.Entry) targets = self.arg2nodes(target, self.fs.Entry) for side_effect in side_effects: if side_effect.multiple_side_effect_has_builder(): raise SCons.Errors.UserError("Multiple ways to build the same target were specified for: %s" % str(side_effect)) side_effect.add_source(targets) side_effect.side_effect = 1 self.Precious(side_effect) for target in targets: target.side_effects.append(side_effect) return side_effects def SourceCode(self, entry, builder): """Arrange for a source code builder for (part of) a tree.""" msg = """SourceCode() has been deprecated and there is no replacement. \tIf you need this function, please contact [email protected].""" SCons.Warnings.warn(SCons.Warnings.DeprecatedSourceCodeWarning, msg) entries = self.arg2nodes(entry, self.fs.Entry) for entry in entries: entry.set_src_builder(builder) return entries def SourceSignatures(self, type): global _warn_source_signatures_deprecated if _warn_source_signatures_deprecated: msg = "The env.SourceSignatures() method is deprecated;\n" + \ "\tconvert your build to use the env.Decider() method instead." SCons.Warnings.warn(SCons.Warnings.DeprecatedSourceSignaturesWarning, msg) _warn_source_signatures_deprecated = False type = self.subst(type) self.src_sig_type = type if type == 'MD5': if not SCons.Util.md5: raise UserError("MD5 signatures are not available in this version of Python.") self.decide_source = self._changed_content elif type == 'timestamp': self.decide_source = self._changed_timestamp_match else: raise UserError("Unknown source signature type '%s'" % type) def Split(self, arg): """This function converts a string or list into a list of strings or Nodes. This makes things easier for users by allowing files to be specified as a white-space separated list to be split. The input rules are: - A single string containing names separated by spaces. These will be split apart at the spaces. - A single Node instance - A list containing either strings or Node instances. Any strings in the list are not split at spaces. In all cases, the function returns a list of Nodes and strings.""" if SCons.Util.is_List(arg): return list(map(self.subst, arg)) elif SCons.Util.is_String(arg): return self.subst(arg).split() else: return [self.subst(arg)] def TargetSignatures(self, type): global _warn_target_signatures_deprecated if _warn_target_signatures_deprecated: msg = "The env.TargetSignatures() method is deprecated;\n" + \ "\tconvert your build to use the env.Decider() method instead." SCons.Warnings.warn(SCons.Warnings.DeprecatedTargetSignaturesWarning, msg) _warn_target_signatures_deprecated = False type = self.subst(type) self.tgt_sig_type = type if type in ('MD5', 'content'): if not SCons.Util.md5: raise UserError("MD5 signatures are not available in this version of Python.") self.decide_target = self._changed_content elif type == 'timestamp': self.decide_target = self._changed_timestamp_match elif type == 'build': self.decide_target = self._changed_build elif type == 'source': self.decide_target = self._changed_source else: raise UserError("Unknown target signature type '%s'"%type) def Value(self, value, built_value=None): """ """ return SCons.Node.Python.Value(value, built_value) def VariantDir(self, variant_dir, src_dir, duplicate=1): variant_dir = self.arg2nodes(variant_dir, self.fs.Dir)[0] src_dir = self.arg2nodes(src_dir, self.fs.Dir)[0] self.fs.VariantDir(variant_dir, src_dir, duplicate) def FindSourceFiles(self, node='.'): """ returns a list of all source files. """ node = self.arg2nodes(node, self.fs.Entry)[0] sources = [] def build_source(ss): for s in ss: if isinstance(s, SCons.Node.FS.Dir): build_source(s.all_children()) elif s.has_builder(): build_source(s.sources) elif isinstance(s.disambiguate(), SCons.Node.FS.File): sources.append(s) build_source(node.all_children()) def final_source(node): while (node != node.srcnode()): node = node.srcnode() return node sources = map( final_source, sources ); # remove duplicates return list(set(sources)) def FindInstalledFiles(self): """ returns the list of all targets of the Install and InstallAs Builder. """ from SCons.Tool import install if install._UNIQUE_INSTALLED_FILES is None: install._UNIQUE_INSTALLED_FILES = SCons.Util.uniquer_hashables(install._INSTALLED_FILES) return install._UNIQUE_INSTALLED_FILES class OverrideEnvironment(Base): """A proxy that overrides variables in a wrapped construction environment by returning values from an overrides dictionary in preference to values from the underlying subject environment. This is a lightweight (I hope) proxy that passes through most use of attributes to the underlying Environment.Base class, but has just enough additional methods defined to act like a real construction environment with overridden values. It can wrap either a Base construction environment, or another OverrideEnvironment, which can in turn nest arbitrary OverrideEnvironments... Note that we do *not* call the underlying base class (SubsitutionEnvironment) initialization, because we get most of those from proxying the attributes of the subject construction environment. But because we subclass SubstitutionEnvironment, this class also has inherited arg2nodes() and subst*() methods; those methods can't be proxied because they need *this* object's methods to fetch the values from the overrides dictionary. """ def __init__(self, subject, overrides={}): if SCons.Debug.track_instances: logInstanceCreation(self, 'Environment.OverrideEnvironment') self.__dict__['__subject'] = subject self.__dict__['overrides'] = overrides # Methods that make this class act like a proxy. def __getattr__(self, name): return getattr(self.__dict__['__subject'], name) def __setattr__(self, name, value): setattr(self.__dict__['__subject'], name, value) # Methods that make this class act like a dictionary. def __getitem__(self, key): try: return self.__dict__['overrides'][key] except KeyError: return self.__dict__['__subject'].__getitem__(key) def __setitem__(self, key, value): if not is_valid_construction_var(key): raise SCons.Errors.UserError("Illegal construction variable `%s'" % key) self.__dict__['overrides'][key] = value def __delitem__(self, key): try: del self.__dict__['overrides'][key] except KeyError: deleted = 0 else: deleted = 1 try: result = self.__dict__['__subject'].__delitem__(key) except KeyError: if not deleted: raise result = None return result def get(self, key, default=None): """Emulates the get() method of dictionaries.""" try: return self.__dict__['overrides'][key] except KeyError: return self.__dict__['__subject'].get(key, default) def has_key(self, key): try: self.__dict__['overrides'][key] return 1 except KeyError: return key in self.__dict__['__subject'] def __contains__(self, key): if self.__dict__['overrides'].__contains__(key): return 1 return self.__dict__['__subject'].__contains__(key) def Dictionary(self): """Emulates the items() method of dictionaries.""" d = self.__dict__['__subject'].Dictionary().copy() d.update(self.__dict__['overrides']) return d def items(self): """Emulates the items() method of dictionaries.""" return list(self.Dictionary().items()) # Overridden private construction environment methods. def _update(self, dict): """Update an environment's values directly, bypassing the normal checks that occur when users try to set items. """ self.__dict__['overrides'].update(dict) def gvars(self): return self.__dict__['__subject'].gvars() def lvars(self): lvars = self.__dict__['__subject'].lvars() lvars.update(self.__dict__['overrides']) return lvars # Overridden public construction environment methods. def Replace(self, **kw): kw = copy_non_reserved_keywords(kw) self.__dict__['overrides'].update(semi_deepcopy(kw)) # The entry point that will be used by the external world # to refer to a construction environment. This allows the wrapper # interface to extend a construction environment for its own purposes # by subclassing SCons.Environment.Base and then assigning the # class to SCons.Environment.Environment. Environment = Base # An entry point for returning a proxy subclass instance that overrides # the subst*() methods so they don't actually perform construction # variable substitution. This is specifically intended to be the shim # layer in between global function calls (which don't want construction # variable substitution) and the DefaultEnvironment() (which would # substitute variables if left to its own devices).""" # # We have to wrap this in a function that allows us to delay definition of # the class until it's necessary, so that when it subclasses Environment # it will pick up whatever Environment subclass the wrapper interface # might have assigned to SCons.Environment.Environment. def NoSubstitutionProxy(subject): class _NoSubstitutionProxy(Environment): def __init__(self, subject): self.__dict__['__subject'] = subject def __getattr__(self, name): return getattr(self.__dict__['__subject'], name) def __setattr__(self, name, value): return setattr(self.__dict__['__subject'], name, value) def executor_to_lvars(self, kwdict): if kwdict.has_key('executor'): kwdict['lvars'] = kwdict['executor'].get_lvars() del kwdict['executor'] else: kwdict['lvars'] = {} def raw_to_mode(self, dict): try: raw = dict['raw'] except KeyError: pass else: del dict['raw'] dict['mode'] = raw def subst(self, string, *args, **kwargs): return string def subst_kw(self, kw, *args, **kwargs): return kw def subst_list(self, string, *args, **kwargs): nargs = (string, self,) + args nkw = kwargs.copy() nkw['gvars'] = {} self.executor_to_lvars(nkw) self.raw_to_mode(nkw) return SCons.Subst.scons_subst_list(*nargs, **nkw) def subst_target_source(self, string, *args, **kwargs): nargs = (string, self,) + args nkw = kwargs.copy() nkw['gvars'] = {} self.executor_to_lvars(nkw) self.raw_to_mode(nkw) return SCons.Subst.scons_subst(*nargs, **nkw) return _NoSubstitutionProxy(subject) # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:<|fim▁end|>
<|file_name|>image.rs<|end_file_name|><|fim▁begin|><|fim▁hole|> //! CSS handling for the specified value of //! [`image`][image]s //! //! [image]: https://drafts.csswg.org/css-images/#image-values use Atom; use cssparser::{Parser, Token}; use custom_properties::SpecifiedValue; use parser::{Parse, ParserContext}; use selectors::parser::SelectorParseErrorKind; #[cfg(feature = "servo")] use servo_url::ServoUrl; use std::cmp::Ordering; use std::f32::consts::PI; use std::fmt; use style_traits::{ToCss, ParseError, StyleParseErrorKind}; use values::{Either, None_}; #[cfg(feature = "gecko")] use values::computed::{Context, Position as ComputedPosition, ToComputedValue}; use values::generics::image::{Circle, CompatMode, Ellipse, ColorStop as GenericColorStop}; use values::generics::image::{EndingShape as GenericEndingShape, Gradient as GenericGradient}; use values::generics::image::{GradientItem as GenericGradientItem, GradientKind as GenericGradientKind}; use values::generics::image::{Image as GenericImage, LineDirection as GenericsLineDirection}; use values::generics::image::{MozImageRect as GenericMozImageRect, ShapeExtent}; use values::generics::image::PaintWorklet; use values::generics::position::Position as GenericPosition; use values::specified::{Angle, Color, Length, LengthOrPercentage}; use values::specified::{Number, NumberOrPercentage, Percentage, RGBAColor}; use values::specified::position::{LegacyPosition, Position, PositionComponent, Side, X, Y}; use values::specified::url::SpecifiedUrl; /// A specified image layer. pub type ImageLayer = Either<None_, Image>; /// Specified values for an image according to CSS-IMAGES. /// https://drafts.csswg.org/css-images/#image-values pub type Image = GenericImage<Gradient, MozImageRect, SpecifiedUrl>; /// Specified values for a CSS gradient. /// https://drafts.csswg.org/css-images/#gradients #[cfg(not(feature = "gecko"))] pub type Gradient = GenericGradient< LineDirection, Length, LengthOrPercentage, Position, RGBAColor, Angle, >; /// Specified values for a CSS gradient. /// https://drafts.csswg.org/css-images/#gradients #[cfg(feature = "gecko")] pub type Gradient = GenericGradient< LineDirection, Length, LengthOrPercentage, GradientPosition, RGBAColor, Angle, >; /// A specified gradient kind. #[cfg(not(feature = "gecko"))] pub type GradientKind = GenericGradientKind< LineDirection, Length, LengthOrPercentage, Position, Angle, >; /// A specified gradient kind. #[cfg(feature = "gecko")] pub type GradientKind = GenericGradientKind< LineDirection, Length, LengthOrPercentage, GradientPosition, Angle, >; /// A specified gradient line direction. #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "gecko", derive(MallocSizeOf))] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub enum LineDirection { /// An angular direction. Angle(Angle), /// A horizontal direction. Horizontal(X), /// A vertical direction. Vertical(Y), /// A direction towards a corner of a box. Corner(X, Y), /// A Position and an Angle for legacy `-moz-` prefixed gradient. /// `-moz-` prefixed linear gradient can contain both a position and an angle but it /// uses legacy syntax for position. That means we can't specify both keyword and /// length for each horizontal/vertical components. #[cfg(feature = "gecko")] MozPosition(Option<LegacyPosition>, Option<Angle>), } /// A binary enum to hold either Position or LegacyPosition. #[derive(Clone, Debug, MallocSizeOf, PartialEq, ToCss)] #[cfg(feature = "gecko")] pub enum GradientPosition { /// 1, 2, 3, 4-valued <position>. Modern(Position), /// 1, 2-valued <position>. Legacy(LegacyPosition), } /// A specified ending shape. pub type EndingShape = GenericEndingShape<Length, LengthOrPercentage>; /// A specified gradient item. pub type GradientItem = GenericGradientItem<RGBAColor, LengthOrPercentage>; /// A computed color stop. pub type ColorStop = GenericColorStop<RGBAColor, LengthOrPercentage>; /// Specified values for `moz-image-rect` /// -moz-image-rect(<uri>, top, right, bottom, left); pub type MozImageRect = GenericMozImageRect<NumberOrPercentage, SpecifiedUrl>; impl Parse for Image { #[cfg_attr(not(feature = "gecko"), allow(unused_mut))] fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Image, ParseError<'i>> { if let Ok(mut url) = input.try(|input| SpecifiedUrl::parse(context, input)) { #[cfg(feature = "gecko")] { url.build_image_value(); } return Ok(GenericImage::Url(url)); } if let Ok(gradient) = input.try(|i| Gradient::parse(context, i)) { return Ok(GenericImage::Gradient(Box::new(gradient))); } #[cfg(feature = "servo")] { if let Ok(paint_worklet) = input.try(|i| PaintWorklet::parse(context, i)) { return Ok(GenericImage::PaintWorklet(paint_worklet)); } } if let Ok(mut image_rect) = input.try(|input| MozImageRect::parse(context, input)) { #[cfg(feature = "gecko")] { image_rect.url.build_image_value(); } return Ok(GenericImage::Rect(Box::new(image_rect))); } Ok(GenericImage::Element(Image::parse_element(input)?)) } } impl Image { /// Creates an already specified image value from an already resolved URL /// for insertion in the cascade. #[cfg(feature = "servo")] pub fn for_cascade(url: ServoUrl) -> Self { GenericImage::Url(SpecifiedUrl::for_cascade(url)) } /// Parses a `-moz-element(# <element-id>)`. fn parse_element<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Atom, ParseError<'i>> { input.try(|i| i.expect_function_matching("-moz-element"))?; let location = input.current_source_location(); input.parse_nested_block(|i| { match *i.next()? { Token::IDHash(ref id) => Ok(Atom::from(id.as_ref())), ref t => Err(location.new_unexpected_token_error(t.clone())), } }) } } impl Parse for Gradient { fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> { enum Shape { Linear, Radial, } // FIXME: remove clone() when lifetimes are non-lexical let func = input.expect_function()?.clone(); let result = match_ignore_ascii_case! { &func, "linear-gradient" => { Some((Shape::Linear, false, CompatMode::Modern)) }, "-webkit-linear-gradient" => { Some((Shape::Linear, false, CompatMode::WebKit)) }, #[cfg(feature = "gecko")] "-moz-linear-gradient" => { Some((Shape::Linear, false, CompatMode::Moz)) }, "repeating-linear-gradient" => { Some((Shape::Linear, true, CompatMode::Modern)) }, "-webkit-repeating-linear-gradient" => { Some((Shape::Linear, true, CompatMode::WebKit)) }, #[cfg(feature = "gecko")] "-moz-repeating-linear-gradient" => { Some((Shape::Linear, true, CompatMode::Moz)) }, "radial-gradient" => { Some((Shape::Radial, false, CompatMode::Modern)) }, "-webkit-radial-gradient" => { Some((Shape::Radial, false, CompatMode::WebKit)) } #[cfg(feature = "gecko")] "-moz-radial-gradient" => { Some((Shape::Radial, false, CompatMode::Moz)) }, "repeating-radial-gradient" => { Some((Shape::Radial, true, CompatMode::Modern)) }, "-webkit-repeating-radial-gradient" => { Some((Shape::Radial, true, CompatMode::WebKit)) }, #[cfg(feature = "gecko")] "-moz-repeating-radial-gradient" => { Some((Shape::Radial, true, CompatMode::Moz)) }, "-webkit-gradient" => { return input.parse_nested_block(|i| Self::parse_webkit_gradient_argument(context, i)); }, _ => None, }; let (shape, repeating, mut compat_mode) = match result { Some(result) => result, None => return Err(input.new_custom_error(StyleParseErrorKind::UnexpectedFunction(func.clone()))), }; let (kind, items) = input.parse_nested_block(|i| { let shape = match shape { Shape::Linear => GradientKind::parse_linear(context, i, &mut compat_mode)?, Shape::Radial => GradientKind::parse_radial(context, i, &mut compat_mode)?, }; let items = GradientItem::parse_comma_separated(context, i)?; Ok((shape, items)) })?; if items.len() < 2 { return Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)); } Ok(Gradient { items: items, repeating: repeating, kind: kind, compat_mode: compat_mode, }) } } impl Gradient { fn parse_webkit_gradient_argument<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> { type Point = GenericPosition<Component<X>, Component<Y>>; #[derive(Clone, Copy)] enum Component<S> { Center, Number(NumberOrPercentage), Side(S), } impl LineDirection { fn from_points(first: Point, second: Point) -> Self { let h_ord = first.horizontal.partial_cmp(&second.horizontal); let v_ord = first.vertical.partial_cmp(&second.vertical); let (h, v) = match (h_ord, v_ord) { (Some(h), Some(v)) => (h, v), _ => return LineDirection::Vertical(Y::Bottom), }; match (h, v) { (Ordering::Less, Ordering::Less) => { LineDirection::Corner(X::Right, Y::Bottom) }, (Ordering::Less, Ordering::Equal) => { LineDirection::Horizontal(X::Right) }, (Ordering::Less, Ordering::Greater) => { LineDirection::Corner(X::Right, Y::Top) }, (Ordering::Equal, Ordering::Greater) => { LineDirection::Vertical(Y::Top) }, (Ordering::Equal, Ordering::Equal) | (Ordering::Equal, Ordering::Less) => { LineDirection::Vertical(Y::Bottom) }, (Ordering::Greater, Ordering::Less) => { LineDirection::Corner(X::Left, Y::Bottom) }, (Ordering::Greater, Ordering::Equal) => { LineDirection::Horizontal(X::Left) }, (Ordering::Greater, Ordering::Greater) => { LineDirection::Corner(X::Left, Y::Top) }, } } } impl From<Point> for Position { fn from(point: Point) -> Self { Self::new(point.horizontal.into(), point.vertical.into()) } } impl Parse for Point { fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> { input.try(|i| { let x = Component::parse(context, i)?; let y = Component::parse(context, i)?; Ok(Self::new(x, y)) }) } } impl<S: Side> From<Component<S>> for NumberOrPercentage { fn from(component: Component<S>) -> Self { match component { Component::Center => NumberOrPercentage::Percentage(Percentage::new(0.5)), Component::Number(number) => number, Component::Side(side) => { let p = if side.is_start() { Percentage::zero() } else { Percentage::hundred() }; NumberOrPercentage::Percentage(p) }, } } } impl<S: Side> From<Component<S>> for PositionComponent<S> { fn from(component: Component<S>) -> Self { match component { Component::Center => { PositionComponent::Center }, Component::Number(NumberOrPercentage::Number(number)) => { PositionComponent::Length(Length::from_px(number.value).into()) }, Component::Number(NumberOrPercentage::Percentage(p)) => { PositionComponent::Length(p.into()) }, Component::Side(side) => { PositionComponent::Side(side, None) }, } } } impl<S: Copy + Side> Component<S> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { match (NumberOrPercentage::from(*self), NumberOrPercentage::from(*other)) { (NumberOrPercentage::Percentage(a), NumberOrPercentage::Percentage(b)) => { a.get().partial_cmp(&b.get()) }, (NumberOrPercentage::Number(a), NumberOrPercentage::Number(b)) => { a.value.partial_cmp(&b.value) }, (_, _) => { None } } } } impl<S: Parse> Parse for Component<S> { fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> { if let Ok(side) = input.try(|i| S::parse(context, i)) { return Ok(Component::Side(side)); } if let Ok(number) = input.try(|i| NumberOrPercentage::parse(context, i)) { return Ok(Component::Number(number)); } input.try(|i| i.expect_ident_matching("center"))?; Ok(Component::Center) } } let ident = input.expect_ident_cloned()?; input.expect_comma()?; let (kind, reverse_stops) = match_ignore_ascii_case! { &ident, "linear" => { let first = Point::parse(context, input)?; input.expect_comma()?; let second = Point::parse(context, input)?; let direction = LineDirection::from_points(first, second); let kind = GenericGradientKind::Linear(direction); (kind, false) }, "radial" => { let first_point = Point::parse(context, input)?; input.expect_comma()?; let first_radius = Number::parse(context, input)?; input.expect_comma()?; let second_point = Point::parse(context, input)?; input.expect_comma()?; let second_radius = Number::parse(context, input)?; let (reverse_stops, point, radius) = if second_radius.value >= first_radius.value { (false, second_point, second_radius) } else { (true, first_point, first_radius) }; let shape = GenericEndingShape::Circle(Circle::Radius(Length::from_px(radius.value))); let position: Position = point.into(); #[cfg(feature = "gecko")] { let kind = GenericGradientKind::Radial(shape, GradientPosition::Modern(position), None); (kind, reverse_stops) } #[cfg(not(feature = "gecko"))] { let kind = GenericGradientKind::Radial(shape, position, None); (kind, reverse_stops) } }, _ => return Err(input.new_custom_error(SelectorParseErrorKind::UnexpectedIdent(ident.clone()))), }; let mut items = input.try(|i| { i.expect_comma()?; i.parse_comma_separated(|i| { let function = i.expect_function()?.clone(); let (color, mut p) = i.parse_nested_block(|i| { let p = match_ignore_ascii_case! { &function, "color-stop" => { let p = match NumberOrPercentage::parse(context, i)? { NumberOrPercentage::Number(number) => Percentage::new(number.value), NumberOrPercentage::Percentage(p) => p, }; i.expect_comma()?; p }, "from" => Percentage::zero(), "to" => Percentage::hundred(), _ => return Err(i.new_custom_error(StyleParseErrorKind::UnexpectedFunction(function.clone()))), }; let color = Color::parse(context, i)?; if color == Color::CurrentColor { return Err(i.new_custom_error(StyleParseErrorKind::UnspecifiedError)); } Ok((color.into(), p)) })?; if reverse_stops { p.reverse(); } Ok(GenericGradientItem::ColorStop(GenericColorStop { color: color, position: Some(p.into()), })) }) }).unwrap_or(vec![]); if items.is_empty() { items = vec![ GenericGradientItem::ColorStop(GenericColorStop { color: Color::transparent().into(), position: Some(Percentage::zero().into()), }), GenericGradientItem::ColorStop(GenericColorStop { color: Color::transparent().into(), position: Some(Percentage::hundred().into()), }), ]; } else if items.len() == 1 { let first = items[0].clone(); items.push(first); } else { items.sort_by(|a, b| { match (a, b) { (&GenericGradientItem::ColorStop(ref a), &GenericGradientItem::ColorStop(ref b)) => { match (&a.position, &b.position) { (&Some(LengthOrPercentage::Percentage(a)), &Some(LengthOrPercentage::Percentage(b))) => { return a.0.partial_cmp(&b.0).unwrap_or(Ordering::Equal); }, _ => {}, } }, _ => {}, } if reverse_stops { Ordering::Greater } else { Ordering::Less } }) } Ok(GenericGradient { kind: kind, items: items, repeating: false, compat_mode: CompatMode::Modern, }) } } impl GradientKind { /// Parses a linear gradient. /// CompatMode can change during `-moz-` prefixed gradient parsing if it come across a `to` keyword. fn parse_linear<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>, compat_mode: &mut CompatMode) -> Result<Self, ParseError<'i>> { let direction = if let Ok(d) = input.try(|i| LineDirection::parse(context, i, compat_mode)) { input.expect_comma()?; d } else { match *compat_mode { CompatMode::Modern => LineDirection::Vertical(Y::Bottom), _ => LineDirection::Vertical(Y::Top), } }; Ok(GenericGradientKind::Linear(direction)) } fn parse_radial<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>, compat_mode: &mut CompatMode) -> Result<Self, ParseError<'i>> { let (shape, position, angle, moz_position) = match *compat_mode { CompatMode::Modern => { let shape = input.try(|i| EndingShape::parse(context, i, *compat_mode)); let position = input.try(|i| { i.expect_ident_matching("at")?; Position::parse(context, i) }); (shape, position.ok(), None, None) }, CompatMode::WebKit => { let position = input.try(|i| Position::parse(context, i)); let shape = input.try(|i| { if position.is_ok() { i.expect_comma()?; } EndingShape::parse(context, i, *compat_mode) }); (shape, position.ok(), None, None) }, // The syntax of `-moz-` prefixed radial gradient is: // -moz-radial-gradient( // [ [ <position> || <angle> ]? [ ellipse | [ <length> | <percentage> ]{2} ] , | // [ <position> || <angle> ]? [ [ circle | ellipse ] | <extent-keyword> ] , | // ]? // <color-stop> [ , <color-stop> ]+ // ) // where <extent-keyword> = closest-corner | closest-side | farthest-corner | farthest-side | // cover | contain // and <color-stop> = <color> [ <percentage> | <length> ]? CompatMode::Moz => { let mut position = input.try(|i| LegacyPosition::parse(context, i)); let angle = input.try(|i| Angle::parse(context, i)).ok(); if position.is_err() { position = input.try(|i| LegacyPosition::parse(context, i)); } let shape = input.try(|i| { if position.is_ok() || angle.is_some() { i.expect_comma()?; } EndingShape::parse(context, i, *compat_mode) }); (shape, None, angle, position.ok()) } }; if shape.is_ok() || position.is_some() || angle.is_some() || moz_position.is_some() { input.expect_comma()?; } let shape = shape.unwrap_or({ GenericEndingShape::Ellipse(Ellipse::Extent(ShapeExtent::FarthestCorner)) }); #[cfg(feature = "gecko")] { if *compat_mode == CompatMode::Moz { // If this form can be represented in Modern mode, then convert the compat_mode to Modern. if angle.is_none() { *compat_mode = CompatMode::Modern; } let position = moz_position.unwrap_or(LegacyPosition::center()); return Ok(GenericGradientKind::Radial(shape, GradientPosition::Legacy(position), angle)); } } let position = position.unwrap_or(Position::center()); #[cfg(feature = "gecko")] { return Ok(GenericGradientKind::Radial(shape, GradientPosition::Modern(position), angle)); } #[cfg(not(feature = "gecko"))] { return Ok(GenericGradientKind::Radial(shape, position, angle)); } } } impl GenericsLineDirection for LineDirection { fn points_downwards(&self, compat_mode: CompatMode) -> bool { match *self { LineDirection::Angle(ref angle) => angle.radians() == PI, LineDirection::Vertical(Y::Bottom) if compat_mode == CompatMode::Modern => true, LineDirection::Vertical(Y::Top) if compat_mode != CompatMode::Modern => true, #[cfg(feature = "gecko")] LineDirection::MozPosition(Some(LegacyPosition { horizontal: ref x, vertical: ref y, }), None) => { use values::computed::Percentage as ComputedPercentage; use values::specified::transform::OriginComponent; // `50% 0%` is the default value for line direction. // These percentage values can also be keywords. let x = match *x { OriginComponent::Center => true, OriginComponent::Length(LengthOrPercentage::Percentage(ComputedPercentage(val))) => { val == 0.5 }, _ => false, }; let y = match *y { OriginComponent::Side(Y::Top) => true, OriginComponent::Length(LengthOrPercentage::Percentage(ComputedPercentage(val))) => { val == 0.0 }, _ => false, }; x && y }, _ => false, } } fn to_css<W>(&self, dest: &mut W, compat_mode: CompatMode) -> fmt::Result where W: fmt::Write { match *self { LineDirection::Angle(angle) => { angle.to_css(dest) }, LineDirection::Horizontal(x) => { if compat_mode == CompatMode::Modern { dest.write_str("to ")?; } x.to_css(dest) }, LineDirection::Vertical(y) => { if compat_mode == CompatMode::Modern { dest.write_str("to ")?; } y.to_css(dest) }, LineDirection::Corner(x, y) => { if compat_mode == CompatMode::Modern { dest.write_str("to ")?; } x.to_css(dest)?; dest.write_str(" ")?; y.to_css(dest) }, #[cfg(feature = "gecko")] LineDirection::MozPosition(ref position, ref angle) => { let mut need_space = false; if let Some(ref position) = *position { position.to_css(dest)?; need_space = true; } if let Some(ref angle) = *angle { if need_space { dest.write_str(" ")?; } angle.to_css(dest)?; } Ok(()) }, } } } impl LineDirection { fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>, compat_mode: &mut CompatMode) -> Result<Self, ParseError<'i>> { let mut _angle = if *compat_mode == CompatMode::Moz { input.try(|i| Angle::parse(context, i)).ok() } else { if let Ok(angle) = input.try(|i| Angle::parse_with_unitless(context, i)) { return Ok(LineDirection::Angle(angle)); } None }; input.try(|i| { let to_ident = i.try(|i| i.expect_ident_matching("to")); match *compat_mode { // `to` keyword is mandatory in modern syntax. CompatMode::Modern => to_ident?, // Fall back to Modern compatibility mode in case there is a `to` keyword. // According to Gecko, `-moz-linear-gradient(to ...)` should serialize like // `linear-gradient(to ...)`. CompatMode::Moz if to_ident.is_ok() => *compat_mode = CompatMode::Modern, // There is no `to` keyword in webkit prefixed syntax. If it's consumed, // parsing should throw an error. CompatMode::WebKit if to_ident.is_ok() => { return Err(i.new_custom_error(SelectorParseErrorKind::UnexpectedIdent("to".into()))) }, _ => {}, } #[cfg(feature = "gecko")] { // `-moz-` prefixed linear gradient can be both Angle and Position. if *compat_mode == CompatMode::Moz { let position = i.try(|i| LegacyPosition::parse(context, i)).ok(); if _angle.is_none() { _angle = i.try(|i| Angle::parse(context, i)).ok(); }; if _angle.is_none() && position.is_none() { return Err(i.new_custom_error(StyleParseErrorKind::UnspecifiedError)); } return Ok(LineDirection::MozPosition(position, _angle)); } } if let Ok(x) = i.try(X::parse) { if let Ok(y) = i.try(Y::parse) { return Ok(LineDirection::Corner(x, y)); } return Ok(LineDirection::Horizontal(x)); } let y = Y::parse(i)?; if let Ok(x) = i.try(X::parse) { return Ok(LineDirection::Corner(x, y)); } Ok(LineDirection::Vertical(y)) }) } } #[cfg(feature = "gecko")] impl ToComputedValue for GradientPosition { type ComputedValue = ComputedPosition; fn to_computed_value(&self, context: &Context) -> ComputedPosition { match *self { GradientPosition::Modern(ref pos) => pos.to_computed_value(context), GradientPosition::Legacy(ref pos) => pos.to_computed_value(context), } } fn from_computed_value(computed: &ComputedPosition) -> Self { GradientPosition::Modern(ToComputedValue::from_computed_value(computed)) } } impl EndingShape { fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>, compat_mode: CompatMode) -> Result<Self, ParseError<'i>> { if let Ok(extent) = input.try(|i| ShapeExtent::parse_with_compat_mode(i, compat_mode)) { if input.try(|i| i.expect_ident_matching("circle")).is_ok() { return Ok(GenericEndingShape::Circle(Circle::Extent(extent))); } let _ = input.try(|i| i.expect_ident_matching("ellipse")); return Ok(GenericEndingShape::Ellipse(Ellipse::Extent(extent))); } if input.try(|i| i.expect_ident_matching("circle")).is_ok() { if let Ok(extent) = input.try(|i| ShapeExtent::parse_with_compat_mode(i, compat_mode)) { return Ok(GenericEndingShape::Circle(Circle::Extent(extent))); } if compat_mode == CompatMode::Modern { if let Ok(length) = input.try(|i| Length::parse(context, i)) { return Ok(GenericEndingShape::Circle(Circle::Radius(length))); } } return Ok(GenericEndingShape::Circle(Circle::Extent(ShapeExtent::FarthestCorner))); } if input.try(|i| i.expect_ident_matching("ellipse")).is_ok() { if let Ok(extent) = input.try(|i| ShapeExtent::parse_with_compat_mode(i, compat_mode)) { return Ok(GenericEndingShape::Ellipse(Ellipse::Extent(extent))); } if compat_mode == CompatMode::Modern { let pair: Result<_, ParseError> = input.try(|i| { let x = LengthOrPercentage::parse(context, i)?; let y = LengthOrPercentage::parse(context, i)?; Ok((x, y)) }); if let Ok((x, y)) = pair { return Ok(GenericEndingShape::Ellipse(Ellipse::Radii(x, y))); } } return Ok(GenericEndingShape::Ellipse(Ellipse::Extent(ShapeExtent::FarthestCorner))); } // -moz- prefixed radial gradient doesn't allow EndingShape's Length or LengthOrPercentage // to come before shape keyword. Otherwise it conflicts with <position>. if compat_mode != CompatMode::Moz { if let Ok(length) = input.try(|i| Length::parse(context, i)) { if let Ok(y) = input.try(|i| LengthOrPercentage::parse(context, i)) { if compat_mode == CompatMode::Modern { let _ = input.try(|i| i.expect_ident_matching("ellipse")); } return Ok(GenericEndingShape::Ellipse(Ellipse::Radii(length.into(), y))); } if compat_mode == CompatMode::Modern { let y = input.try(|i| { i.expect_ident_matching("ellipse")?; LengthOrPercentage::parse(context, i) }); if let Ok(y) = y { return Ok(GenericEndingShape::Ellipse(Ellipse::Radii(length.into(), y))); } let _ = input.try(|i| i.expect_ident_matching("circle")); } return Ok(GenericEndingShape::Circle(Circle::Radius(length))); } } input.try(|i| { let x = Percentage::parse(context, i)?; let y = if let Ok(y) = i.try(|i| LengthOrPercentage::parse(context, i)) { if compat_mode == CompatMode::Modern { let _ = i.try(|i| i.expect_ident_matching("ellipse")); } y } else { if compat_mode == CompatMode::Modern { i.expect_ident_matching("ellipse")?; } LengthOrPercentage::parse(context, i)? }; Ok(GenericEndingShape::Ellipse(Ellipse::Radii(x.into(), y))) }) } } impl ShapeExtent { fn parse_with_compat_mode<'i, 't>(input: &mut Parser<'i, 't>, compat_mode: CompatMode) -> Result<Self, ParseError<'i>> { match Self::parse(input)? { ShapeExtent::Contain | ShapeExtent::Cover if compat_mode == CompatMode::Modern => { Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)) }, ShapeExtent::Contain => Ok(ShapeExtent::ClosestSide), ShapeExtent::Cover => Ok(ShapeExtent::FarthestCorner), keyword => Ok(keyword), } } } impl GradientItem { fn parse_comma_separated<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Vec<Self>, ParseError<'i>> { let mut seen_stop = false; let items = input.parse_comma_separated(|input| { if seen_stop { if let Ok(hint) = input.try(|i| LengthOrPercentage::parse(context, i)) { seen_stop = false; return Ok(GenericGradientItem::InterpolationHint(hint)); } } seen_stop = true; ColorStop::parse(context, input).map(GenericGradientItem::ColorStop) })?; if !seen_stop || items.len() < 2 { return Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)); } Ok(items) } } impl Parse for ColorStop { fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> { Ok(ColorStop { color: RGBAColor::parse(context, input)?, position: input.try(|i| LengthOrPercentage::parse(context, i)).ok(), }) } } impl Parse for PaintWorklet { fn parse<'i, 't>( _context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<Self, ParseError<'i>> { input.expect_function_matching("paint")?; input.parse_nested_block(|input| { let name = Atom::from(&**input.expect_ident()?); let arguments = input.try(|input| { input.expect_comma()?; input.parse_comma_separated(|input| SpecifiedValue::parse(input)) }).unwrap_or(vec![]); Ok(PaintWorklet { name, arguments }) }) } } impl Parse for MozImageRect { fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> { input.try(|i| i.expect_function_matching("-moz-image-rect"))?; input.parse_nested_block(|i| { let string = i.expect_url_or_string()?; let url = SpecifiedUrl::parse_from_string(string.as_ref().to_owned(), context)?; i.expect_comma()?; let top = NumberOrPercentage::parse_non_negative(context, i)?; i.expect_comma()?; let right = NumberOrPercentage::parse_non_negative(context, i)?; i.expect_comma()?; let bottom = NumberOrPercentage::parse_non_negative(context, i)?; i.expect_comma()?; let left = NumberOrPercentage::parse_non_negative(context, i)?; Ok(MozImageRect { url: url, top: top, right: right, bottom: bottom, left: left, }) }) } }<|fim▁end|>
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<|file_name|>_type.py<|end_file_name|><|fim▁begin|># ---------------------------------------------------------------------------- # Copyright (c) 2016-2021, QIIME 2 development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- from qiime2.plugin import SemanticType from ..plugin_setup import plugin from . import AlphaDiversityDirectoryFormat<|fim▁hole|> AlphaDiversity = SemanticType('AlphaDiversity', variant_of=SampleData.field['type']) plugin.register_semantic_types(SampleData, AlphaDiversity) plugin.register_semantic_type_to_format( SampleData[AlphaDiversity], artifact_format=AlphaDiversityDirectoryFormat )<|fim▁end|>
SampleData = SemanticType('SampleData', field_names='type')
<|file_name|>JMathPlotter3D.java<|end_file_name|><|fim▁begin|>/* * RapidMiner * * Copyright (C) 2001-2014 by RapidMiner and the contributors * * Complete list of developers available at our web site: * * http://rapidminer.com * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by<|fim▁hole|> * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see http://www.gnu.org/licenses/. */ package com.rapidminer.gui.plotter.mathplot; import org.math.plot.Plot3DPanel; import org.math.plot.PlotPanel; import com.rapidminer.datatable.DataTable; import com.rapidminer.gui.plotter.PlotterConfigurationModel; /** The abstract super class for all 3D plotters using the JMathPlot library. * * @author Ingo Mierswa */ public abstract class JMathPlotter3D extends JMathPlotter { private static final long serialVersionUID = -8695197842788069313L; public JMathPlotter3D(PlotterConfigurationModel settings) { super(settings); } public JMathPlotter3D(PlotterConfigurationModel settings, DataTable dataTable) { super(settings, dataTable); } @Override public PlotPanel createPlotPanel() { return new Plot3DPanel(); } @Override public int getNumberOfOptionIcons() { return 5; } }<|fim▁end|>
* the Free Software Foundation, either version 3 of the License, or
<|file_name|>sokoban.rs<|end_file_name|><|fim▁begin|>#![crate_type = "bin"] #![allow(unused_must_use)] //extern crate native; extern crate libc; use std::from_str::{FromStr}; use std::io::{File}; use std::io::stdio::{stdin}; use std::path::{Path}; use std::os; use sokoboard::{SokoBoard}; use sokoannotatedboard::{SokoAnnotatedBoard, do_sylvan}; mod raw; mod bdd; mod sokoboard; mod sokoannotatedboard; fn main() { let args = os::args(); let contents; if args.len() > 1 { contents = File::open(&Path::new(args[1].as_slice())).read_to_str();<|fim▁hole|> contents = stdin().read_to_str(); println!("Reading from stdin."); } let board: SokoBoard = FromStr::from_str( contents.unwrap() ) .expect("Invalid sokoban board"); let annotated = SokoAnnotatedBoard::fromSokoBoard(board); do_sylvan(&annotated); }<|fim▁end|>
println!("Reading from file."); } else {
<|file_name|>layout.js<|end_file_name|><|fim▁begin|>window.Lunchiatto.module('Transfer', function(Transfer, App, Backbone, Marionette, $, _) { return Transfer.Layout = Marionette.LayoutView.extend({ template: 'transfers/layout', ui: { receivedTransfers: '.received-transfers', submittedTransfers: '.submitted-transfers' }, behaviors: { Animateable: { types: ['fadeIn'] }, Titleable: {} }, regions: { receivedTransfers: '@ui.receivedTransfers', submittedTransfers: '@ui.submittedTransfers' }, onRender() {<|fim▁hole|> }, _showTransfers(type) { const transfers = new App.Entities.Transfers([], {type}); transfers.optionedFetch({ success: transfers => { App.getUsers().then(() => { const view = new App.Transfer.List({ collection: transfers}); this[`${type}Transfers`].show(view); }); } }); }, _htmlTitle() { return 'Transfers'; } }); });<|fim▁end|>
this._showTransfers('received'); this._showTransfers('submitted');
<|file_name|>nonblock_server.cpp<|end_file_name|><|fim▁begin|>#include "gen-cpp/LogSender.h" #include <thrift/protocol/TBinaryProtocol.h> #include <thrift/server/TNonblockingServer.h> #include <thrift/transport/TServerSocket.h> #include <thrift/transport/TBufferTransports.h> #include <thrift/concurrency/PosixThreadFactory.h> using namespace ::apache::thrift; using namespace ::apache::thrift::protocol; using namespace ::apache::thrift::transport; using namespace ::apache::thrift::server; using namespace ::apache::thrift::concurrency; using boost::shared_ptr; #define THREAD_NUM 5 std::map<std::string, std::string> logMap; class LogSenderHandler : virtual public LogSenderIf { public: LogSenderHandler() { // Your initialization goes here } void SendLog(const std::vector<LogInfo> & loglist) { // Your implementation goes here sleep(5); time_t now = time(NULL); printf("SendLog, now = %s\n", ctime(&now)); for (size_t i = 0; i < loglist.size(); ++i) { if (logMap.find(loglist[i].name) == logMap.end()) { logMap.insert(std::make_pair(loglist[i].name, loglist[i].content)); } } } void GetLog(std::string& _return, const std::string& logname) { // Your implementation goes here std::map<std::string,std::string>::iterator iter = logMap.find(logname); if (iter != logMap.end()) { _return = iter->second; } else { _return = "Not Found!"; } } }; int main(int argc, char **argv) { int port = 9090; shared_ptr<LogSenderHandler> handler(new LogSenderHandler()); shared_ptr<TProcessor> processor(new LogSenderProcessor(handler)); shared_ptr<TProtocolFactory> protocolFactory(new TBinaryProtocolFactory()); shared_ptr<ThreadManager> threadManager = ThreadManager::newSimpleThreadManager(THREAD_NUM); shared_ptr<PosixThreadFactory> threadFactory = shared_ptr<PosixThreadFactory> (new PosixThreadFactory()); threadManager->threadFactory(threadFactory); threadManager->start();<|fim▁hole|> TNonblockingServer server(processor, protocolFactory, port, threadManager); server.serve(); return 0; }<|fim▁end|>
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals from future.builtins import str from datetime import datetime import re try: from urllib.parse import quote except ImportError: # Python 2 from urllib import quote from django.db import models from django.utils.encoding import python_2_unicode_compatible from django.utils.html import urlize from django.utils.timezone import make_aware, utc from django.utils.translation import ugettext_lazy as _ from requests_oauthlib import OAuth1 import requests from mezzanine.conf import settings from mezzanine.twitter import QUERY_TYPE_CHOICES, QUERY_TYPE_USER, \ QUERY_TYPE_LIST, QUERY_TYPE_SEARCH from mezzanine.twitter import get_auth_settings from mezzanine.twitter.managers import TweetManager re_usernames = re.compile("@([0-9a-zA-Z+_]+)", re.IGNORECASE) re_hashtags = re.compile("#([0-9a-zA-Z+_]+)", re.IGNORECASE) replace_hashtags = "<a href=\"http://twitter.com/search?q=%23\\1\">#\\1</a>" replace_usernames = "<a href=\"http://twitter.com/\\1\">@\\1</a>" class TwitterQueryException(Exception): pass @python_2_unicode_compatible class Query(models.Model): type = models.CharField(_("Type"), choices=QUERY_TYPE_CHOICES, max_length=10) value = models.CharField(_("Value"), max_length=140) interested = models.BooleanField("Interested", default=True) class Meta: verbose_name = _("Twitter query") verbose_name_plural = _("Twitter queries") ordering = ("-id",) def __str__(self): return "%s: %s" % (self.get_type_display(), self.value) def run(self): """ Request new tweets from the Twitter API. """ try: value = quote(self.value) except KeyError: value = self.value urls = { QUERY_TYPE_USER: ("https://api.twitter.com/1.1/statuses/" "user_timeline.json?screen_name=%s" "&include_rts=true" % value.lstrip("@")),<|fim▁hole|> "?list_id=%s&include_rts=true" % value), QUERY_TYPE_SEARCH: "https://api.twitter.com/1.1/search/tweets.json" "?q=%s" % value, } try: url = urls[self.type] except KeyError: raise TwitterQueryException("Invalid query type: %s" % self.type) settings.use_editable() auth_settings = get_auth_settings() if not auth_settings: from mezzanine.conf import registry if self.value == registry["TWITTER_DEFAULT_QUERY"]["default"]: # These are some read-only keys and secrets we use # for the default query (eg nothing has been configured) auth_settings = ( "KxZTRD3OBft4PP0iQW0aNQ", "sXpQRSDUVJ2AVPZTfh6MrJjHfOGcdK4wRb1WTGQ", "1368725588-ldWCsd54AJpG2xcB5nyTHyCeIC3RJcNVUAkB1OI", "r9u7qS18t8ad4Hu9XVqmCGxlIpzoCN3e1vx6LOSVgyw3R", ) else: raise TwitterQueryException("Twitter OAuth settings missing") try: tweets = requests.get(url, auth=OAuth1(*auth_settings)).json() except Exception as e: raise TwitterQueryException("Error retrieving: %s" % e) try: raise TwitterQueryException(tweets["errors"][0]["message"]) except (IndexError, KeyError, TypeError): pass if self.type == "search": tweets = tweets["statuses"] for tweet_json in tweets: remote_id = str(tweet_json["id"]) tweet, created = self.tweets.get_or_create(remote_id=remote_id) if not created: continue if "retweeted_status" in tweet_json: user = tweet_json['user'] tweet.retweeter_user_name = user["screen_name"] tweet.retweeter_full_name = user["name"] tweet.retweeter_profile_image_url = user["profile_image_url"] tweet_json = tweet_json["retweeted_status"] if self.type == QUERY_TYPE_SEARCH: tweet.user_name = tweet_json['user']['screen_name'] tweet.full_name = tweet_json['user']['name'] tweet.profile_image_url = \ tweet_json['user']["profile_image_url"] date_format = "%a %b %d %H:%M:%S +0000 %Y" else: user = tweet_json["user"] tweet.user_name = user["screen_name"] tweet.full_name = user["name"] tweet.profile_image_url = user["profile_image_url"] date_format = "%a %b %d %H:%M:%S +0000 %Y" tweet.text = urlize(tweet_json["text"]) tweet.text = re_usernames.sub(replace_usernames, tweet.text) tweet.text = re_hashtags.sub(replace_hashtags, tweet.text) if getattr(settings, 'TWITTER_STRIP_HIGH_MULTIBYTE', False): chars = [ch for ch in tweet.text if ord(ch) < 0x800] tweet.text = ''.join(chars) d = datetime.strptime(tweet_json["created_at"], date_format) tweet.created_at = make_aware(d, utc) try: tweet.save() except Warning: pass tweet.save() self.interested = False self.save() class Tweet(models.Model): remote_id = models.CharField(_("Twitter ID"), max_length=50) created_at = models.DateTimeField(_("Date/time"), null=True) text = models.TextField(_("Message"), null=True) profile_image_url = models.URLField(_("Profile image URL"), null=True) user_name = models.CharField(_("User name"), max_length=100, null=True) full_name = models.CharField(_("Full name"), max_length=100, null=True) retweeter_profile_image_url = models.URLField( _("Profile image URL (Retweeted by)"), null=True) retweeter_user_name = models.CharField( _("User name (Retweeted by)"), max_length=100, null=True) retweeter_full_name = models.CharField( _("Full name (Retweeted by)"), max_length=100, null=True) query = models.ForeignKey("Query", related_name="tweets") objects = TweetManager() class Meta: verbose_name = _("Tweet") verbose_name_plural = _("Tweets") ordering = ("-created_at",) def __str__(self): return "%s: %s" % (self.user_name, self.text) def is_retweet(self): return self.retweeter_user_name is not None<|fim▁end|>
QUERY_TYPE_LIST: ("https://api.twitter.com/1.1/lists/statuses.json"
<|file_name|>118A - String Task.py<|end_file_name|><|fim▁begin|><|fim▁hole|>v = 'aeiouy' n = '' for c in word: if not c in v: n += '.' + c print n<|fim▁end|>
word = raw_input().lower()
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.views import generic from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse_lazy from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import tabs from horizon import tables from billingdashboard.common import get_user_invoices from billingdashboard.dashboards.project.cust_invoice \ import tables as invoice_table from astutedashboard.common import get_invoices, get_invoice class IndexView(tables.DataTableView): table_class = invoice_table.UserInvoiceListingTable template_name = 'project/cust_invoice/index.html' page_title = _("Invoices") def get_data(self): return get_user_invoices(self.request, verbose=True) class UserInvoiceDetailsView(generic.TemplateView):<|fim▁hole|> def get_context_data(self, **kwargs): context = super(UserInvoiceDetailsView, self).get_context_data(**kwargs) id = self.kwargs['invoice_id'] context['invoice'] = get_invoice(self.request, id, verbose=True) return context<|fim▁end|>
template_name = 'project/cust_invoice/invoice.html'
<|file_name|>common.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- """ Django settings for puput_demo project. For more information on this file, see https://docs.djangoproject.com/en/dev/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/dev/ref/settings/ """ from __future__ import absolute_import, unicode_literals from puput import PUPUT_APPS import environ ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /) APPS_DIR = ROOT_DIR.path('puput-demo') env = environ.Env() # APP CONFIGURATION # ------------------------------------------------------------------------------ DJANGO_APPS = ( # Default Django apps: 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Admin 'django.contrib.admin', ) INSTALLED_APPS = DJANGO_APPS + PUPUT_APPS # MIDDLEWARE CONFIGURATION # ------------------------------------------------------------------------------ MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'wagtail.wagtailcore.middleware.SiteMiddleware', 'wagtail.wagtailredirects.middleware.RedirectMiddleware' ) # DEBUG # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#debug DEBUG = env.bool("DJANGO_DEBUG", False) # FIXTURE CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS FIXTURE_DIRS = ( str(APPS_DIR.path('fixtures')), ) # EMAIL CONFIGURATION # ------------------------------------------------------------------------------ EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend') # DATABASE CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#databases DATABASES = { # Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ 'default': env.db("DATABASE_URL", default="postgres:///puput-demo"), } DATABASES['default']['ATOMIC_REQUESTS'] = True <|fim▁hole|># ------------------------------------------------------------------------------ # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'UTC' # See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code LANGUAGE_CODE = 'en-us' # See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id SITE_ID = 1 # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n USE_I18N = True # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n USE_L10N = True # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz USE_TZ = True # TEMPLATE CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#templates TEMPLATES = [ { # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND 'BACKEND': 'django.template.backends.django.DjangoTemplates', # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs 'DIRS': [ str(APPS_DIR.path('templates')), ], 'OPTIONS': { # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug 'debug': DEBUG, # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders # https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types 'loaders': [ 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ], # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.template.context_processors.i18n', 'django.template.context_processors.media', 'django.template.context_processors.static', 'django.template.context_processors.tz', 'django.contrib.messages.context_processors.messages', # Your stuff: custom template context processors go here ], }, }, ] # STATIC FILE CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root STATIC_ROOT = str(ROOT_DIR('staticfiles')) # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url STATIC_URL = '/static/' # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # MEDIA CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root MEDIA_ROOT = str(APPS_DIR('media')) # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url MEDIA_URL = '/media/' # URL Configuration # ------------------------------------------------------------------------------ ROOT_URLCONF = 'config.urls' # See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application WSGI_APPLICATION = 'config.wsgi.application' # LOGGING CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#logging # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s ' '%(process)d %(thread)d %(message)s' }, }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'verbose', }, }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, 'loggers': { 'django.security.DisallowedHost': { 'level': 'ERROR', 'handlers': ['console', 'mail_admins'], 'propagate': True, }, }, } } WAGTAIL_SITE_NAME = 'Demo'<|fim▁end|>
# GENERAL CONFIGURATION
<|file_name|>gruntfile.js<|end_file_name|><|fim▁begin|>module.exports = function(grunt) { // load all grunt tasks require('matchdep').filterDev('grunt-*').forEach(grunt.loadNpmTasks); grunt.initConfig({ // watch for changes and trigger compass, jshint, uglify and livereload watch: { js: { files: ['jquery.accrue.js'], tasks: ['jshint','uglify'], options: { livereload: true, }, }, css: { files: 'example.scss', tasks: ['sass'], options: { livereload: true, }, } },<|fim▁hole|> sass: { dist: { options: { // nested, compact, compressed, expanded style: 'compressed' }, files: { 'example.css': 'example.scss' } } }, // uglify to concat & minify uglify: { js: { files: { 'jquery.accrue.min.js': 'jquery.accrue.js', } } }, // lint me. jshint: { all: ['jquery.accrue.js'] } }); // register task grunt.registerTask('default', ['watch']); };<|fim▁end|>
// we use the Sass
<|file_name|>DetectionEvents.d.ts<|end_file_name|><|fim▁begin|>export declare enum DetectionEvents { /** * Event triggered by a audio detector indicating that its active state has changed from active to inactive or vice * versa. * @event * @type {boolean} - true when service has changed to active false otherwise. */ DETECTOR_STATE_CHANGE = "detector_state_change", /** Event triggered by {@link NoAudioSignalDetector} when the local audio device associated with a JitsiConference * starts receiving audio levels with the value of 0 meaning no audio is being captured on that device, or when * it starts receiving audio levels !== 0 after being in a state of no audio. * @event * @type {boolean} - true when the current conference audio track has audio input false otherwise. */ AUDIO_INPUT_STATE_CHANGE = "audio_input_state_changed", /** Event triggered by NoAudioSignalDetector when the local audio device associated with a JitsiConference goes silent * for a period of time, meaning that the device is either broken or hardware/software muted. * @event * @type {void}<|fim▁hole|> */ NO_AUDIO_INPUT = "no_audio_input_detected", /** * Event generated by {@link VADNoiseDetection} when the tracked device is considered noisy. * @event * @type {Object} */ VAD_NOISY_DEVICE = "detection.vad_noise_device", /** * Event generated by VADReportingService when if finishes creating a VAD report for the monitored devices. * The generated objects are of type Array<Object>, one score for each monitored device. * @event VAD_REPORT_PUBLISHED * @type Array<Object> with the following structure: * @property {Date} timestamp - Timestamp at which the compute took place. * @property {number} avgVAD - Average VAD score over monitored period of time. * @property {string} deviceId - Associate local audio device ID. */ VAD_REPORT_PUBLISHED = "vad-report-published", /** * Event generated by {@link TrackVADEmitter} when PCM sample VAD score is available. * * @event * @type {Object} * @property {Date} timestamp - Exact time at which processed PCM sample was generated. * @property {number} score - VAD score on a scale from 0 to 1 (i.e. 0.7) * @property {Float32Array} pcmData - Raw PCM data with which the VAD score was calculated. * @property {string} deviceId - Device id of the associated track. */ VAD_SCORE_PUBLISHED = "detection.vad_score_published", /** * Event generated by {@link VADTalkMutedDetection} when a user is talking while the mic is muted. * * @event * @type {Object} */ VAD_TALK_WHILE_MUTED = "detection.vad_talk_while_muted" } export declare const DETECTOR_STATE_CHANGE = DetectionEvents.DETECTOR_STATE_CHANGE; export declare const AUDIO_INPUT_STATE_CHANGE = DetectionEvents.AUDIO_INPUT_STATE_CHANGE; export declare const NO_AUDIO_INPUT = DetectionEvents.NO_AUDIO_INPUT; export declare const VAD_NOISY_DEVICE = DetectionEvents.VAD_NOISY_DEVICE; export declare const VAD_REPORT_PUBLISHED = DetectionEvents.VAD_REPORT_PUBLISHED; export declare const VAD_SCORE_PUBLISHED = DetectionEvents.VAD_SCORE_PUBLISHED; export declare const VAD_TALK_WHILE_MUTED = DetectionEvents.VAD_TALK_WHILE_MUTED;<|fim▁end|>
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># # Extensible User Folder # # (C) Copyright 2000-2004 The Internet (Aust) Pty Ltd # ACN: 082 081 472 ABN: 83 082 081 472 # All Rights Reserved # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # # Author: Andrew Milton <[email protected]> # $Id: __init__.py,v 1.1 2004/11/10 14:15:53 akm Exp $<|fim▁hole|> # If this fails due to NUG being absent, just skip it try: import zodbGroupSource except ImportError: pass<|fim▁end|>
import nullGroupSource
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|># # Copyright (c) 2016 SUSE Linux GmbH #<|fim▁hole|># # dbxincluder is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # dbxincluder is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with dbxincluder. If not, see <http://www.gnu.org/licenses/>. import glob import os import pytest def pytest_generate_tests(metafunc): """Replace the xmltestcases fixture by all *.case.xml files in tests/cases""" if "xmltestcase" in metafunc.fixturenames: location = os.path.dirname(os.path.realpath(__file__)) testcases = glob.glob(location + "/cases/*.case.xml") testcases.sort() # Sort them alphabetically metafunc.parametrize("xmltestcase", testcases)<|fim▁end|>
# This file is part of dbxincluder.
<|file_name|>hello_triangle.rs<|end_file_name|><|fim▁begin|>extern crate bootstrap_rs as bootstrap; extern crate polygon; use bootstrap::window::*; use polygon::*; use polygon::anchor::*; use polygon::camera::*; use polygon::math::*; use polygon::mesh_instance::*; use polygon::geometry::mesh::*; static VERTEX_POSITIONS: [f32; 12] = [ -1.0, -1.0, 0.0, 1.0, 1.0, -1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, ]; static INDICES: [u32; 3] = [0, 1, 2]; fn main() { // Open a window and create the renderer instance. let mut window = Window::new("Hello, Triangle!").unwrap(); let mut renderer = RendererBuilder::new(&window).build(); // Build a triangle mesh. let mesh = MeshBuilder::new()<|fim▁hole|> // Send the mesh to the GPU. let gpu_mesh = renderer.register_mesh(&mesh); // Create an anchor and register it with the renderer. let anchor = Anchor::new(); let anchor_id = renderer.register_anchor(anchor); // Setup the material for the mesh. let mut material = renderer.default_material(); material.set_color("surface_color", Color::rgb(1.0, 0.0, 0.0)); // Create a mesh instance, attach it to the anchor, and register it. let mut mesh_instance = MeshInstance::with_owned_material(gpu_mesh, material); mesh_instance.set_anchor(anchor_id); renderer.register_mesh_instance(mesh_instance); // Create a camera and an anchor for it. let mut camera_anchor = Anchor::new(); camera_anchor.set_position(Point::new(0.0, 0.0, 10.0)); let camera_anchor_id = renderer.register_anchor(camera_anchor); let mut camera = Camera::default(); camera.set_anchor(camera_anchor_id); renderer.register_camera(camera); // Set ambient color to pure white so we don't need to worry about lighting. renderer.set_ambient_light(Color::rgb(1.0, 1.0, 1.0)); 'outer: loop { while let Some(message) = window.next_message() { match message { Message::Close => break 'outer, _ => {}, } } // Rotate the triangle slightly. { let anchor = renderer.get_anchor_mut(anchor_id).unwrap(); let orientation = anchor.orientation(); anchor.set_orientation(orientation + Orientation::from_eulers(0.0, 0.0, 0.0005)); } // Render the mesh. renderer.draw(); } }<|fim▁end|>
.set_position_data(Point::slice_from_f32_slice(&VERTEX_POSITIONS)) .set_indices(&INDICES) .build() .unwrap();
<|file_name|>test_admin.py<|end_file_name|><|fim▁begin|>""" Tests course_creators.admin.py. """ import mock import django from django.contrib.admin.sites import AdminSite from django.contrib.auth.models import User from django.core import mail from django.http import HttpRequest from django.test import TestCase from course_creators.admin import CourseCreatorAdmin from course_creators.models import CourseCreator from student import auth from student.roles import CourseCreatorRole def mock_render_to_string(template_name, context): """Return a string that encodes template_name and context""" return str((template_name, context)) class CourseCreatorAdminTest(TestCase): """ Tests for course creator admin. """<|fim▁hole|> self.user = User.objects.create_user('test_user', '[email protected]', 'foo') self.table_entry = CourseCreator(user=self.user) self.table_entry.save() self.admin = User.objects.create_user('Mark', '[email protected]', 'foo') self.admin.is_staff = True self.request = HttpRequest() self.request.user = self.admin self.creator_admin = CourseCreatorAdmin(self.table_entry, AdminSite()) self.studio_request_email = '[email protected]' self.enable_creator_group_patch = { "ENABLE_CREATOR_GROUP": True, "STUDIO_REQUEST_EMAIL": self.studio_request_email } @mock.patch('course_creators.admin.render_to_string', mock.Mock(side_effect=mock_render_to_string, autospec=True)) @mock.patch('django.contrib.auth.models.User.email_user') def test_change_status(self, email_user): """ Tests that updates to state impact the creator group maintained in authz.py and that e-mails are sent. """ def change_state_and_verify_email(state, is_creator): """ Changes user state, verifies creator status, and verifies e-mail is sent based on transition """ self._change_state(state) self.assertEqual(is_creator, auth.user_has_role(self.user, CourseCreatorRole())) context = {'studio_request_email': self.studio_request_email} if state == CourseCreator.GRANTED: template = 'emails/course_creator_granted.txt' elif state == CourseCreator.DENIED: template = 'emails/course_creator_denied.txt' else: template = 'emails/course_creator_revoked.txt' email_user.assert_called_with( mock_render_to_string('emails/course_creator_subject.txt', context), mock_render_to_string(template, context), self.studio_request_email ) with mock.patch.dict('django.conf.settings.FEATURES', self.enable_creator_group_patch): # User is initially unrequested. self.assertFalse(auth.user_has_role(self.user, CourseCreatorRole())) change_state_and_verify_email(CourseCreator.GRANTED, True) change_state_and_verify_email(CourseCreator.DENIED, False) change_state_and_verify_email(CourseCreator.GRANTED, True) change_state_and_verify_email(CourseCreator.PENDING, False) change_state_and_verify_email(CourseCreator.GRANTED, True) change_state_and_verify_email(CourseCreator.UNREQUESTED, False) change_state_and_verify_email(CourseCreator.DENIED, False) @mock.patch('course_creators.admin.render_to_string', mock.Mock(side_effect=mock_render_to_string, autospec=True)) def test_mail_admin_on_pending(self): """ Tests that the admin account is notified when a user is in the 'pending' state. """ def check_admin_message_state(state, expect_sent_to_admin, expect_sent_to_user): """ Changes user state and verifies e-mail sent to admin address only when pending. """ mail.outbox = [] self._change_state(state) # If a message is sent to the user about course creator status change, it will be the first # message sent. Admin message will follow. base_num_emails = 1 if expect_sent_to_user else 0 if expect_sent_to_admin: # TODO: Remove Django 1.11 upgrade shim # SHIM: Usernames come back as unicode in 1.10+, remove this shim post-upgrade if django.VERSION < (1, 10): context = {'user_name': 'test_user', 'user_email': u'[email protected]'} else: context = {'user_name': u'test_user', 'user_email': u'[email protected]'} self.assertEquals(base_num_emails + 1, len(mail.outbox), 'Expected admin message to be sent') sent_mail = mail.outbox[base_num_emails] self.assertEquals( mock_render_to_string('emails/course_creator_admin_subject.txt', context), sent_mail.subject ) self.assertEquals( mock_render_to_string('emails/course_creator_admin_user_pending.txt', context), sent_mail.body ) self.assertEquals(self.studio_request_email, sent_mail.from_email) self.assertEqual([self.studio_request_email], sent_mail.to) else: self.assertEquals(base_num_emails, len(mail.outbox)) with mock.patch.dict('django.conf.settings.FEATURES', self.enable_creator_group_patch): # E-mail message should be sent to admin only when new state is PENDING, regardless of what # previous state was (unless previous state was already PENDING). # E-mail message sent to user only on transition into and out of GRANTED state. check_admin_message_state(CourseCreator.UNREQUESTED, expect_sent_to_admin=False, expect_sent_to_user=False) check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=False) check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True) check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True) check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True) check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=True) check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=False, expect_sent_to_user=False) check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True) def _change_state(self, state): """ Helper method for changing state """ self.table_entry.state = state self.creator_admin.save_model(self.request, self.table_entry, None, True) def test_add_permission(self): """ Tests that staff cannot add entries """ self.assertFalse(self.creator_admin.has_add_permission(self.request)) def test_delete_permission(self): """ Tests that staff cannot delete entries """ self.assertFalse(self.creator_admin.has_delete_permission(self.request)) def test_change_permission(self): """ Tests that only staff can change entries """ self.assertTrue(self.creator_admin.has_change_permission(self.request)) self.request.user = self.user self.assertFalse(self.creator_admin.has_change_permission(self.request)) def test_rate_limit_login(self): with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True}): post_params = {'username': self.user.username, 'password': 'wrong_password'} # try logging in 30 times, the default limit in the number of failed # login attempts in one 5 minute period before the rate gets limited for _ in xrange(30): response = self.client.post('/admin/login/', post_params) self.assertEquals(response.status_code, 200) response = self.client.post('/admin/login/', post_params) # Since we are using the default rate limit behavior, we are # expecting this to return a 403 error to indicate that there have # been too many attempts self.assertEquals(response.status_code, 403)<|fim▁end|>
def setUp(self): """ Test case setup """ super(CourseCreatorAdminTest, self).setUp()
<|file_name|>popup.js<|end_file_name|><|fim▁begin|>//>>excludeStart("jqmBuildExclude", pragmas.jqmBuildExclude); //>>description: Popup windows //>>label: Popups //>>group: Widgets //>>css.theme: ../css/themes/default/jquery.mobile.theme.css //>>css.structure: ../css/structure/jquery.mobile.popup.css,../css/structure/jquery.mobile.transition.css,../css/structure/jquery.mobile.transition.fade.css // Lessons: // You must remove nav bindings even if there is no history. Make sure you // remove nav bindings in the same frame as the beginning of the close process // if there is no history. If there is history, remove nav bindings from the nav // bindings handler - that way, only one of them can fire per close process. define( [ "jquery", "../links", "../widget", "../support", "../events/navigate", "../navigation/path", "../navigation/history", "../navigation/navigator", "../navigation/method", "../animationComplete", "../navigation", "jquery-plugins/jquery.hashchange" ], function( jQuery ) { //>>excludeEnd("jqmBuildExclude"); (function( $, undefined ) { function fitSegmentInsideSegment( windowSize, segmentSize, offset, desired ) { var returnValue = desired; if ( windowSize < segmentSize ) { // Center segment if it's bigger than the window returnValue = offset + ( windowSize - segmentSize ) / 2; } else { // Otherwise center it at the desired coordinate while keeping it completely inside the window returnValue = Math.min( Math.max( offset, desired - segmentSize / 2 ), offset + windowSize - segmentSize ); } return returnValue; } function getWindowCoordinates( theWindow ) { return { x: theWindow.scrollLeft(), y: theWindow.scrollTop(), cx: ( theWindow[ 0 ].innerWidth || theWindow.width() ), cy: ( theWindow[ 0 ].innerHeight || theWindow.height() ) }; } $.widget( "mobile.popup", { options: { wrapperClass: null, theme: null, overlayTheme: null, shadow: true, corners: true, transition: "none", positionTo: "origin", tolerance: null, closeLinkSelector: "a:jqmData(rel='back')", closeLinkEvents: "click.popup", navigateEvents: "navigate.popup", closeEvents: "navigate.popup pagebeforechange.popup", dismissible: true, enhanced: false, // NOTE Windows Phone 7 has a scroll position caching issue that // requires us to disable popup history management by default // https://github.com/jquery/jquery-mobile/issues/4784 // // NOTE this option is modified in _create! history: !$.mobile.browser.oldIE }, _create: function() { var theElement = this.element, myId = theElement.attr( "id" ), currentOptions = this.options; // We need to adjust the history option to be false if there's no AJAX nav. // We can't do it in the option declarations because those are run before // it is determined whether there shall be AJAX nav. currentOptions.history = currentOptions.history && $.mobile.ajaxEnabled && $.mobile.hashListeningEnabled; // Define instance variables $.extend( this, { _scrollTop: 0, _page: theElement.closest( ".ui-page" ), _ui: null, _fallbackTransition: "", _currentTransition: false, _prerequisites: null, _isOpen: false, _tolerance: null, _resizeData: null, _ignoreResizeTo: 0, _orientationchangeInProgress: false }); if ( this._page.length === 0 ) { this._page = $( "body" ); } if ( currentOptions.enhanced ) { this._ui = { container: theElement.parent(), screen: theElement.parent().prev(), placeholder: $( this.document[ 0 ].getElementById( myId + "-placeholder" ) ) }; } else { this._ui = this._enhance( theElement, myId ); this._applyTransition( currentOptions.transition ); } this ._setTolerance( currentOptions.tolerance ) ._ui.focusElement = this._ui.container; // Event handlers this._on( this._ui.screen, { "vclick": "_eatEventAndClose" } ); this._on( this.window, { orientationchange: $.proxy( this, "_handleWindowOrientationchange" ), resize: $.proxy( this, "_handleWindowResize" ), keyup: $.proxy( this, "_handleWindowKeyUp" ) }); this._on( this.document, { "focusin": "_handleDocumentFocusIn" } ); }, _enhance: function( theElement, myId ) { var currentOptions = this.options, wrapperClass = currentOptions.wrapperClass, ui = { screen: $( "<div class='ui-screen-hidden ui-popup-screen " + this._themeClassFromOption( "ui-overlay-", currentOptions.overlayTheme ) + "'></div>" ), placeholder: $( "<div style='display: none;'><!-- placeholder --></div>" ), container: $( "<div class='ui-popup-container ui-popup-hidden ui-popup-truncate" + ( wrapperClass ? ( " " + wrapperClass ) : "" ) + "'></div>" ) }, fragment = this.document[ 0 ].createDocumentFragment(); fragment.appendChild( ui.screen[ 0 ] ); fragment.appendChild( ui.container[ 0 ] ); if ( myId ) { ui.screen.attr( "id", myId + "-screen" ); ui.container.attr( "id", myId + "-popup" ); ui.placeholder .attr( "id", myId + "-placeholder" ) .html( "<!-- placeholder for " + myId + " -->" ); } // Apply the proto this._page[ 0 ].appendChild( fragment ); // Leave a placeholder where the element used to be ui.placeholder.insertAfter( theElement ); theElement .detach() .addClass( "ui-popup " + this._themeClassFromOption( "ui-body-", currentOptions.theme ) + " " + ( currentOptions.shadow ? "ui-overlay-shadow " : "" ) + ( currentOptions.corners ? "ui-corner-all " : "" ) ) .appendTo( ui.container ); return ui; }, _eatEventAndClose: function( theEvent ) { theEvent.preventDefault(); theEvent.stopImmediatePropagation(); if ( this.options.dismissible ) { this.close(); } return false; }, // Make sure the screen covers the entire document - CSS is sometimes not // enough to accomplish this. _resizeScreen: function() { var screen = this._ui.screen, popupHeight = this._ui.container.outerHeight( true ), screenHeight = screen.removeAttr( "style" ).height(), // Subtracting 1 here is necessary for an obscure Andrdoid 4.0 bug where // the browser hangs if the screen covers the entire document :/ documentHeight = this.document.height() - 1; if ( screenHeight < documentHeight ) { screen.height( documentHeight ); } else if ( popupHeight > screenHeight ) { screen.height( popupHeight ); } }, _handleWindowKeyUp: function( theEvent ) { if ( this._isOpen && theEvent.keyCode === $.mobile.keyCode.ESCAPE ) { return this._eatEventAndClose( theEvent ); } }, _expectResizeEvent: function() { var windowCoordinates = getWindowCoordinates( this.window ); if ( this._resizeData ) { if ( windowCoordinates.x === this._resizeData.windowCoordinates.x && windowCoordinates.y === this._resizeData.windowCoordinates.y && windowCoordinates.cx === this._resizeData.windowCoordinates.cx && windowCoordinates.cy === this._resizeData.windowCoordinates.cy ) { // timeout not refreshed return false; } else { // clear existing timeout - it will be refreshed below clearTimeout( this._resizeData.timeoutId ); } } this._resizeData = { timeoutId: this._delay( "_resizeTimeout", 200 ), windowCoordinates: windowCoordinates }; return true; }, _resizeTimeout: function() { if ( this._isOpen ) { if ( !this._expectResizeEvent() ) { if ( this._ui.container.hasClass( "ui-popup-hidden" ) ) { // effectively rapid-open the popup while leaving the screen intact this._ui.container.removeClass( "ui-popup-hidden ui-popup-truncate" ); this.reposition( { positionTo: "window" } ); this._ignoreResizeEvents(); } this._resizeScreen(); this._resizeData = null; this._orientationchangeInProgress = false; } } else { this._resizeData = null; this._orientationchangeInProgress = false; } }, _stopIgnoringResizeEvents: function() { this._ignoreResizeTo = 0; }, _ignoreResizeEvents: function() { if ( this._ignoreResizeTo ) { clearTimeout( this._ignoreResizeTo ); } this._ignoreResizeTo = this._delay( "_stopIgnoringResizeEvents", 1000 ); }, _handleWindowResize: function(/* theEvent */) { if ( this._isOpen && this._ignoreResizeTo === 0 ) { if ( ( this._expectResizeEvent() || this._orientationchangeInProgress ) && !this._ui.container.hasClass( "ui-popup-hidden" ) ) { // effectively rapid-close the popup while leaving the screen intact this._ui.container .addClass( "ui-popup-hidden ui-popup-truncate" ) .removeAttr( "style" ); } } }, _handleWindowOrientationchange: function(/* theEvent */) { if ( !this._orientationchangeInProgress && this._isOpen && this._ignoreResizeTo === 0 ) { this._expectResizeEvent(); this._orientationchangeInProgress = true; } }, // When the popup is open, attempting to focus on an element that is not a // child of the popup will redirect focus to the popup _handleDocumentFocusIn: function( theEvent ) { var target, targetElement = theEvent.target, ui = this._ui; if ( !this._isOpen ) { return; } if ( targetElement !== ui.container[ 0 ] ) { target = $( targetElement ); if ( 0 === target.parents().filter( ui.container[ 0 ] ).length ) { $( this.document[ 0 ].activeElement ).one( "focus", function(/* theEvent */) { target.blur(); }); ui.focusElement.focus(); theEvent.preventDefault(); theEvent.stopImmediatePropagation(); return false; } else if ( ui.focusElement[ 0 ] === ui.container[ 0 ] ) { ui.focusElement = target; } } this._ignoreResizeEvents(); }, _themeClassFromOption: function( prefix, value ) { return ( value ? ( value === "none" ? "" : ( prefix + value ) ) : ( prefix + "inherit" ) ); }, _applyTransition: function( value ) { if ( value ) { this._ui.container.removeClass( this._fallbackTransition ); if ( value !== "none" ) { this._fallbackTransition = $.mobile._maybeDegradeTransition( value ); if ( this._fallbackTransition === "none" ) { this._fallbackTransition = ""; } this._ui.container.addClass( this._fallbackTransition ); } } return this; }, _setOptions: function( newOptions ) { var currentOptions = this.options, theElement = this.element, screen = this._ui.screen; if ( newOptions.wrapperClass !== undefined ) { this._ui.container .removeClass( currentOptions.wrapperClass ) .addClass( newOptions.wrapperClass ); } if ( newOptions.theme !== undefined ) { theElement .removeClass( this._themeClassFromOption( "ui-body-", currentOptions.theme ) ) .addClass( this._themeClassFromOption( "ui-body-", newOptions.theme ) ); } if ( newOptions.overlayTheme !== undefined ) { screen .removeClass( this._themeClassFromOption( "ui-overlay-", currentOptions.overlayTheme ) ) .addClass( this._themeClassFromOption( "ui-overlay-", newOptions.overlayTheme ) ); if ( this._isOpen ) { screen.addClass( "in" ); } } if ( newOptions.shadow !== undefined ) { theElement.toggleClass( "ui-overlay-shadow", newOptions.shadow ); } if ( newOptions.corners !== undefined ) { theElement.toggleClass( "ui-corner-all", newOptions.corners ); } if ( newOptions.transition !== undefined ) { if ( !this._currentTransition ) { this._applyTransition( newOptions.transition ); } } if ( newOptions.tolerance !== undefined ) { this._setTolerance( newOptions.tolerance ); } if ( newOptions.disabled !== undefined ) { if ( newOptions.disabled ) { this.close(); } } return this._super( newOptions ); }, _setTolerance: function( value ) { var tol = { t: 30, r: 15, b: 30, l: 15 }, ar; if ( value !== undefined ) { ar = String( value ).split( "," ); $.each( ar, function( idx, val ) { ar[ idx ] = parseInt( val, 10 ); } ); switch( ar.length ) { // All values are to be the same case 1: if ( !isNaN( ar[ 0 ] ) ) { tol.t = tol.r = tol.b = tol.l = ar[ 0 ]; } break; // The first value denotes top/bottom tolerance, and the second value denotes left/right tolerance case 2: if ( !isNaN( ar[ 0 ] ) ) { tol.t = tol.b = ar[ 0 ]; } if ( !isNaN( ar[ 1 ] ) ) { tol.l = tol.r = ar[ 1 ]; } break; // The array contains values in the order top, right, bottom, left case 4: if ( !isNaN( ar[ 0 ] ) ) { tol.t = ar[ 0 ]; } if ( !isNaN( ar[ 1 ] ) ) { tol.r = ar[ 1 ]; } if ( !isNaN( ar[ 2 ] ) ) { tol.b = ar[ 2 ]; } if ( !isNaN( ar[ 3 ] ) ) { tol.l = ar[ 3 ]; } break; default: break; } } this._tolerance = tol; return this; }, _clampPopupWidth: function( infoOnly ) { var menuSize, windowCoordinates = getWindowCoordinates( this.window ), // rectangle within which the popup must fit rectangle = { x: this._tolerance.l, y: windowCoordinates.y + this._tolerance.t, cx: windowCoordinates.cx - this._tolerance.l - this._tolerance.r, cy: windowCoordinates.cy - this._tolerance.t - this._tolerance.b }; if ( !infoOnly ) { // Clamp the width of the menu before grabbing its size this._ui.container.css( "max-width", rectangle.cx ); } menuSize = { cx: this._ui.container.outerWidth( true ), cy: this._ui.container.outerHeight( true ) }; return { rc: rectangle, menuSize: menuSize }; }, _calculateFinalLocation: function( desired, clampInfo ) { var returnValue, rectangle = clampInfo.rc, menuSize = clampInfo.menuSize; // Center the menu over the desired coordinates, while not going outside // the window tolerances. This will center wrt. the window if the popup is // too large. returnValue = { left: fitSegmentInsideSegment( rectangle.cx, menuSize.cx, rectangle.x, desired.x ), top: fitSegmentInsideSegment( rectangle.cy, menuSize.cy, rectangle.y, desired.y ) }; // Make sure the top of the menu is visible returnValue.top = Math.max( 0, returnValue.top ); // If the height of the menu is smaller than the height of the document // align the bottom with the bottom of the document returnValue.top -= Math.min( returnValue.top, Math.max( 0, returnValue.top + menuSize.cy - this.document.height() ) ); return returnValue; }, // Try and center the overlay over the given coordinates _placementCoords: function( desired ) { return this._calculateFinalLocation( desired, this._clampPopupWidth() ); }, _createPrerequisites: function( screenPrerequisite, containerPrerequisite, whenDone ) { var prerequisites, self = this; // It is important to maintain both the local variable prerequisites and // self._prerequisites. The local variable remains in the closure of the // functions which call the callbacks passed in. The comparison between the // local variable and self._prerequisites is necessary, because once a // function has been passed to .animationComplete() it will be called next // time an animation completes, even if that's not the animation whose end // the function was supposed to catch (for example, if an abort happens // during the opening animation, the .animationComplete handler is not // called for that animation anymore, but the handler remains attached, so // it is called the next time the popup is opened - making it stale. // Comparing the local variable prerequisites to the widget-level variable // self._prerequisites ensures that callbacks triggered by a stale // .animationComplete will be ignored. prerequisites = { screen: $.Deferred(), container: $.Deferred() }; prerequisites.screen.then( function() { if ( prerequisites === self._prerequisites ) { screenPrerequisite(); }<|fim▁hole|> if ( prerequisites === self._prerequisites ) { containerPrerequisite(); } }); $.when( prerequisites.screen, prerequisites.container ).done( function() { if ( prerequisites === self._prerequisites ) { self._prerequisites = null; whenDone(); } }); self._prerequisites = prerequisites; }, _animate: function( args ) { // NOTE before removing the default animation of the screen // this had an animate callback that would resolve the deferred // now the deferred is resolved immediately // TODO remove the dependency on the screen deferred this._ui.screen .removeClass( args.classToRemove ) .addClass( args.screenClassToAdd ); args.prerequisites.screen.resolve(); if ( args.transition && args.transition !== "none" ) { if ( args.applyTransition ) { this._applyTransition( args.transition ); } if ( this._fallbackTransition ) { this._ui.container .addClass( args.containerClassToAdd ) .removeClass( args.classToRemove ) .animationComplete( $.proxy( args.prerequisites.container, "resolve" ) ); return; } } this._ui.container.removeClass( args.classToRemove ); args.prerequisites.container.resolve(); }, // The desired coordinates passed in will be returned untouched if no reference element can be identified via // desiredPosition.positionTo. Nevertheless, this function ensures that its return value always contains valid // x and y coordinates by specifying the center middle of the window if the coordinates are absent. // options: { x: coordinate, y: coordinate, positionTo: string: "origin", "window", or jQuery selector _desiredCoords: function( openOptions ) { var offset, dst = null, windowCoordinates = getWindowCoordinates( this.window ), x = openOptions.x, y = openOptions.y, pTo = openOptions.positionTo; // Establish which element will serve as the reference if ( pTo && pTo !== "origin" ) { if ( pTo === "window" ) { x = windowCoordinates.cx / 2 + windowCoordinates.x; y = windowCoordinates.cy / 2 + windowCoordinates.y; } else { try { dst = $( pTo ); } catch( err ) { dst = null; } if ( dst ) { dst.filter( ":visible" ); if ( dst.length === 0 ) { dst = null; } } } } // If an element was found, center over it if ( dst ) { offset = dst.offset(); x = offset.left + dst.outerWidth() / 2; y = offset.top + dst.outerHeight() / 2; } // Make sure x and y are valid numbers - center over the window if ( $.type( x ) !== "number" || isNaN( x ) ) { x = windowCoordinates.cx / 2 + windowCoordinates.x; } if ( $.type( y ) !== "number" || isNaN( y ) ) { y = windowCoordinates.cy / 2 + windowCoordinates.y; } return { x: x, y: y }; }, _reposition: function( openOptions ) { // We only care about position-related parameters for repositioning openOptions = { x: openOptions.x, y: openOptions.y, positionTo: openOptions.positionTo }; this._trigger( "beforeposition", undefined, openOptions ); this._ui.container.offset( this._placementCoords( this._desiredCoords( openOptions ) ) ); }, reposition: function( openOptions ) { if ( this._isOpen ) { this._reposition( openOptions ); } }, _openPrerequisitesComplete: function() { var id = this.element.attr( "id" ); this._ui.container.addClass( "ui-popup-active" ); this._isOpen = true; this._resizeScreen(); this._ui.container.attr( "tabindex", "0" ).focus(); this._ignoreResizeEvents(); if ( id ) { this.document.find( "[aria-haspopup='true'][aria-owns='" + id + "']" ).attr( "aria-expanded", true ); } this._trigger( "afteropen" ); }, _open: function( options ) { var openOptions = $.extend( {}, this.options, options ), // TODO move blacklist to private method androidBlacklist = ( function() { var ua = navigator.userAgent, // Rendering engine is Webkit, and capture major version wkmatch = ua.match( /AppleWebKit\/([0-9\.]+)/ ), wkversion = !!wkmatch && wkmatch[ 1 ], androidmatch = ua.match( /Android (\d+(?:\.\d+))/ ), andversion = !!androidmatch && androidmatch[ 1 ], chromematch = ua.indexOf( "Chrome" ) > -1; // Platform is Android, WebKit version is greater than 534.13 ( Android 3.2.1 ) and not Chrome. if ( androidmatch !== null && andversion === "4.0" && wkversion && wkversion > 534.13 && !chromematch ) { return true; } return false; }()); // Count down to triggering "popupafteropen" - we have two prerequisites: // 1. The popup window animation completes (container()) // 2. The screen opacity animation completes (screen()) this._createPrerequisites( $.noop, $.noop, $.proxy( this, "_openPrerequisitesComplete" ) ); this._currentTransition = openOptions.transition; this._applyTransition( openOptions.transition ); this._ui.screen.removeClass( "ui-screen-hidden" ); this._ui.container.removeClass( "ui-popup-truncate" ); // Give applications a chance to modify the contents of the container before it appears this._reposition( openOptions ); this._ui.container.removeClass( "ui-popup-hidden" ); if ( this.options.overlayTheme && androidBlacklist ) { /* TODO: The native browser on Android 4.0.X ("Ice Cream Sandwich") suffers from an issue where the popup overlay appears to be z-indexed above the popup itself when certain other styles exist on the same page -- namely, any element set to `position: fixed` and certain types of input. These issues are reminiscent of previously uncovered bugs in older versions of Android's native browser: https://github.com/scottjehl/Device-Bugs/issues/3 This fix closes the following bugs ( I use "closes" with reluctance, and stress that this issue should be revisited as soon as possible ): https://github.com/jquery/jquery-mobile/issues/4816 https://github.com/jquery/jquery-mobile/issues/4844 https://github.com/jquery/jquery-mobile/issues/4874 */ // TODO sort out why this._page isn't working this.element.closest( ".ui-page" ).addClass( "ui-popup-open" ); } this._animate({ additionalCondition: true, transition: openOptions.transition, classToRemove: "", screenClassToAdd: "in", containerClassToAdd: "in", applyTransition: false, prerequisites: this._prerequisites }); }, _closePrerequisiteScreen: function() { this._ui.screen .removeClass( "out" ) .addClass( "ui-screen-hidden" ); }, _closePrerequisiteContainer: function() { this._ui.container .removeClass( "reverse out" ) .addClass( "ui-popup-hidden ui-popup-truncate" ) .removeAttr( "style" ); }, _closePrerequisitesDone: function() { var container = this._ui.container, id = this.element.attr( "id" ); container.removeAttr( "tabindex" ); // remove the global mutex for popups $.mobile.popup.active = undefined; // Blur elements inside the container, including the container $( ":focus", container[ 0 ] ).add( container[ 0 ] ).blur(); if ( id ) { this.document.find( "[aria-haspopup='true'][aria-owns='" + id + "']" ).attr( "aria-expanded", false ); } // alert users that the popup is closed this._trigger( "afterclose" ); }, _close: function( immediate ) { this._ui.container.removeClass( "ui-popup-active" ); this._page.removeClass( "ui-popup-open" ); this._isOpen = false; // Count down to triggering "popupafterclose" - we have two prerequisites: // 1. The popup window reverse animation completes (container()) // 2. The screen opacity animation completes (screen()) this._createPrerequisites( $.proxy( this, "_closePrerequisiteScreen" ), $.proxy( this, "_closePrerequisiteContainer" ), $.proxy( this, "_closePrerequisitesDone" ) ); this._animate( { additionalCondition: this._ui.screen.hasClass( "in" ), transition: ( immediate ? "none" : ( this._currentTransition ) ), classToRemove: "in", screenClassToAdd: "out", containerClassToAdd: "reverse out", applyTransition: true, prerequisites: this._prerequisites }); }, _unenhance: function() { if ( this.options.enhanced ) { return; } // Put the element back to where the placeholder was and remove the "ui-popup" class this._setOptions( { theme: $.mobile.popup.prototype.options.theme } ); this.element // Cannot directly insertAfter() - we need to detach() first, because // insertAfter() will do nothing if the payload div was not attached // to the DOM at the time the widget was created, and so the payload // will remain inside the container even after we call insertAfter(). // If that happens and we remove the container a few lines below, we // will cause an infinite recursion - #5244 .detach() .insertAfter( this._ui.placeholder ) .removeClass( "ui-popup ui-overlay-shadow ui-corner-all ui-body-inherit" ); this._ui.screen.remove(); this._ui.container.remove(); this._ui.placeholder.remove(); }, _destroy: function() { if ( $.mobile.popup.active === this ) { this.element.one( "popupafterclose", $.proxy( this, "_unenhance" ) ); this.close(); } else { this._unenhance(); } return this; }, _closePopup: function( theEvent, data ) { var parsedDst, toUrl, currentOptions = this.options, immediate = false; if ( ( theEvent && theEvent.isDefaultPrevented() ) || $.mobile.popup.active !== this ) { return; } // restore location on screen window.scrollTo( 0, this._scrollTop ); if ( theEvent && theEvent.type === "pagebeforechange" && data ) { // Determine whether we need to rapid-close the popup, or whether we can // take the time to run the closing transition if ( typeof data.toPage === "string" ) { parsedDst = data.toPage; } else { parsedDst = data.toPage.jqmData( "url" ); } parsedDst = $.mobile.path.parseUrl( parsedDst ); toUrl = parsedDst.pathname + parsedDst.search + parsedDst.hash; if ( this._myUrl !== $.mobile.path.makeUrlAbsolute( toUrl ) ) { // Going to a different page - close immediately immediate = true; } else { theEvent.preventDefault(); } } // remove nav bindings this.window.off( currentOptions.closeEvents ); // unbind click handlers added when history is disabled this.element.undelegate( currentOptions.closeLinkSelector, currentOptions.closeLinkEvents ); this._close( immediate ); }, // any navigation event after a popup is opened should close the popup // NOTE the pagebeforechange is bound to catch navigation events that don't // alter the url (eg, dialogs from popups) _bindContainerClose: function() { this.window .on( this.options.closeEvents, $.proxy( this, "_closePopup" ) ); }, widget: function() { return this._ui.container; }, // TODO no clear deliniation of what should be here and // what should be in _open. Seems to be "visual" vs "history" for now open: function( options ) { var url, hashkey, activePage, currentIsDialog, hasHash, urlHistory, self = this, currentOptions = this.options; // make sure open is idempotent if ( $.mobile.popup.active || currentOptions.disabled ) { return this; } // set the global popup mutex $.mobile.popup.active = this; this._scrollTop = this.window.scrollTop(); // if history alteration is disabled close on navigate events // and leave the url as is if ( !( currentOptions.history ) ) { self._open( options ); self._bindContainerClose(); // When histoy is disabled we have to grab the data-rel // back link clicks so we can close the popup instead of // relying on history to do it for us self.element .delegate( currentOptions.closeLinkSelector, currentOptions.closeLinkEvents, function( theEvent ) { self.close(); theEvent.preventDefault(); }); return this; } // cache some values for min/readability urlHistory = $.mobile.navigate.history; hashkey = $.mobile.dialogHashKey; activePage = $.mobile.activePage; currentIsDialog = ( activePage ? activePage.hasClass( "ui-dialog" ) : false ); this._myUrl = url = urlHistory.getActive().url; hasHash = ( url.indexOf( hashkey ) > -1 ) && !currentIsDialog && ( urlHistory.activeIndex > 0 ); if ( hasHash ) { self._open( options ); self._bindContainerClose(); return this; } // if the current url has no dialog hash key proceed as normal // otherwise, if the page is a dialog simply tack on the hash key if ( url.indexOf( hashkey ) === -1 && !currentIsDialog ) { url = url + (url.indexOf( "#" ) > -1 ? hashkey : "#" + hashkey); } else { url = $.mobile.path.parseLocation().hash + hashkey; } // Tack on an extra hashkey if this is the first page and we've just reconstructed the initial hash if ( urlHistory.activeIndex === 0 && url === urlHistory.initialDst ) { url += hashkey; } // swallow the the initial navigation event, and bind for the next this.window.one( "beforenavigate", function( theEvent ) { theEvent.preventDefault(); self._open( options ); self._bindContainerClose(); }); this.urlAltered = true; $.mobile.navigate( url, { role: "dialog" } ); return this; }, close: function() { // make sure close is idempotent if ( $.mobile.popup.active !== this ) { return this; } this._scrollTop = this.window.scrollTop(); if ( this.options.history && this.urlAltered ) { $.mobile.back(); this.urlAltered = false; } else { // simulate the nav bindings having fired this._closePopup(); } return this; } }); // TODO this can be moved inside the widget $.mobile.popup.handleLink = function( $link ) { var offset, path = $.mobile.path, // NOTE make sure to get only the hash from the href because ie7 (wp7) // returns the absolute href in this case ruining the element selection popup = $( path.hashToSelector( path.parseUrl( $link.attr( "href" ) ).hash ) ).first(); if ( popup.length > 0 && popup.data( "mobile-popup" ) ) { offset = $link.offset(); popup.popup( "open", { x: offset.left + $link.outerWidth() / 2, y: offset.top + $link.outerHeight() / 2, transition: $link.jqmData( "transition" ), positionTo: $link.jqmData( "position-to" ) }); } //remove after delay setTimeout( function() { $link.removeClass( $.mobile.activeBtnClass ); }, 300 ); }; // TODO move inside _create $.mobile.document.on( "pagebeforechange", function( theEvent, data ) { if ( data.options.role === "popup" ) { $.mobile.popup.handleLink( data.options.link ); theEvent.preventDefault(); } }); })( jQuery ); //>>excludeStart("jqmBuildExclude", pragmas.jqmBuildExclude); }); //>>excludeEnd("jqmBuildExclude");<|fim▁end|>
}); prerequisites.container.then( function() {
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>import sys import EulerPy try: from setuptools import setup except ImportError: from distutils.core import setup def readme(): with open('README.rst') as f: return f.read() def requirements(): install_requires = [] with open('requirements.txt') as f: for line in f: install_requires.append(line.strip()) # Terminal colors for Windows if 'win32' in str(sys.platform).lower(): install_requires.append('colorama>=0.2.4') return install_requires setup( name='EulerPy', version=EulerPy.__version__, description=EulerPy.__doc__.strip(), long_description=readme(), url='https://github.com/iKevinY/EulerPy', author=EulerPy.__author__, author_email='[email protected]', license=EulerPy.__license__, packages=['EulerPy'], entry_points={'console_scripts': ['euler = EulerPy.__main__:main']}, install_requires=requirements(), classifiers=[ "License :: OSI Approved :: MIT License", "Topic :: Utilities", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7",<|fim▁hole|> "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", ], keywords=['EulerPy', 'euler', 'project-euler', 'projecteuler'], include_package_data=True, zip_safe=False, )<|fim▁end|>
"Programming Language :: Python :: 3",
<|file_name|>urlwrappers.py<|end_file_name|><|fim▁begin|>import os import functools import logging import six from pelican.utils import (slugify, python_2_unicode_compatible) logger = logging.getLogger(__name__) @python_2_unicode_compatible @functools.total_ordering class URLWrapper(object): def __init__(self, name, settings): # next 2 lines are redundant with the setter of the name property # but are here for clarity self.settings = settings self._name = name self.slug = slugify(name, self.settings.get('SLUG_SUBSTITUTIONS', ())) self.name = name @property def name(self): return self._name @name.setter def name(self, name): self._name = name self.slug = slugify(name, self.settings.get('SLUG_SUBSTITUTIONS', ())) def as_dict(self): d = self.__dict__ d['name'] = self.name return d def __hash__(self): return hash(self.slug) def _key(self): return self.slug def _normalize_key(self, key): subs = self.settings.get('SLUG_SUBSTITUTIONS', ()) return six.text_type(slugify(key, subs)) def __eq__(self, other): return self._key() == self._normalize_key(other) def __ne__(self, other): return self._key() != self._normalize_key(other) def __lt__(self, other): return self._key() < self._normalize_key(other) def __str__(self): return self.name def __repr__(self): return '<{} {}>'.format(type(self).__name__, str(self)) def _from_settings(self, key, get_page_name=False): """Returns URL information as defined in settings. When get_page_name=True returns URL without anything after {slug} e.g. if in settings: CATEGORY_URL="cat/{slug}.html" this returns "cat/{slug}" Useful for pagination. """ setting = "%s_%s" % (self.__class__.__name__.upper(), key) value = self.settings[setting] if not isinstance(value, six.string_types): logger.warning('%s is set to %s', (setting, value)) return value else: if get_page_name: return os.path.splitext(value)[0].format(**self.as_dict()) else: return value.format(**self.as_dict())<|fim▁hole|> url = property(functools.partial(_from_settings, key='URL')) save_as = property(functools.partial(_from_settings, key='SAVE_AS')) class Category(URLWrapper): pass class Tag(URLWrapper): def __init__(self, name, *args, **kwargs): super(Tag, self).__init__(name.strip(), *args, **kwargs) class Author(URLWrapper): pass<|fim▁end|>
page_name = property(functools.partial(_from_settings, key='URL', get_page_name=True))
<|file_name|>cssgroupingrule.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::bindings::codegen::Bindings::CSSGroupingRuleBinding::CSSGroupingRuleMethods; use dom::bindings::error::{ErrorResult, Fallible}; use dom::bindings::inheritance::Castable; use dom::bindings::reflector::DomObject; use dom::bindings::root::{DomRoot, MutNullableDom}; use dom::bindings::str::DOMString; use dom::cssrule::CSSRule; use dom::cssrulelist::{CSSRuleList, RulesSource}; use dom::cssstylesheet::CSSStyleSheet; use dom_struct::dom_struct; use servo_arc::Arc; use style::shared_lock::{SharedRwLock, Locked}; use style::stylesheets::CssRules as StyleCssRules; #[dom_struct] pub struct CSSGroupingRule { cssrule: CSSRule, #[ignore_heap_size_of = "Arc"] rules: Arc<Locked<StyleCssRules>>, rulelist: MutNullableDom<CSSRuleList>, } impl CSSGroupingRule { pub fn new_inherited(parent_stylesheet: &CSSStyleSheet, rules: Arc<Locked<StyleCssRules>>) -> CSSGroupingRule { CSSGroupingRule { cssrule: CSSRule::new_inherited(parent_stylesheet), rules: rules, rulelist: MutNullableDom::new(None), } } fn rulelist(&self) -> DomRoot<CSSRuleList> { let parent_stylesheet = self.upcast::<CSSRule>().parent_stylesheet(); self.rulelist.or_init(|| CSSRuleList::new(self.global().as_window(), parent_stylesheet, RulesSource::Rules(self.rules.clone()))) } pub fn parent_stylesheet(&self) -> &CSSStyleSheet { self.cssrule.parent_stylesheet() } pub fn shared_lock(&self) -> &SharedRwLock { self.cssrule.shared_lock() } } impl CSSGroupingRuleMethods for CSSGroupingRule { // https://drafts.csswg.org/cssom/#dom-cssgroupingrule-cssrules fn CssRules(&self) -> DomRoot<CSSRuleList> { // XXXManishearth check origin clean flag self.rulelist() } // https://drafts.csswg.org/cssom/#dom-cssgroupingrule-insertrule<|fim▁hole|> } // https://drafts.csswg.org/cssom/#dom-cssgroupingrule-deleterule fn DeleteRule(&self, index: u32) -> ErrorResult { self.rulelist().remove_rule(index) } }<|fim▁end|>
fn InsertRule(&self, rule: DOMString, index: u32) -> Fallible<u32> { self.rulelist().insert_rule(&rule, index, /* nested */ true)
<|file_name|>auth.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from flask_login import LoginManager from tamactiluya.models import User login_manager = LoginManager() login_manager.session_protection = 'strong' login_manager.login_view = 'user.login' @login_manager.user_loader def user_loader(uname) -> User or None: """ :param uname:<|fim▁hole|> return User(uname) except User.NotFound: return None<|fim▁end|>
:return: """ try:
<|file_name|>sitemaps.py<|end_file_name|><|fim▁begin|>from django.contrib.sitemaps import Sitemap <|fim▁hole|> changefreq = "yearly" priority = 0.6 protocol = 'https' def items(self): return BlogEntry.on_site.filter(is_visible=True) def lastmod(self, item): return item.modification<|fim▁end|>
from .models import BlogEntry class BlogEntrySitemap(Sitemap):
<|file_name|>trace.py<|end_file_name|><|fim▁begin|>import gevent import gevent.pool import uuid import logging def get_trace(greenlet=None): greenlet = greenlet or gevent.getcurrent() if not hasattr(greenlet, '_iris_trace'): greenlet._iris_trace = {} return greenlet._iris_trace def spawn(*args, **kwargs): greenlet = gevent.Greenlet(*args, **kwargs) greenlet._iris_trace = get_trace().copy() greenlet.start() return greenlet _spawn = spawn class Group(gevent.pool.Group): def spawn(self, *args, **kwargs): g = _spawn(*args, **kwargs) self.add(g)<|fim▁hole|>def trace(**kwargs): get_trace().update(kwargs) def set_id(trace_id=None): trace_id = trace_id or uuid.uuid4().hex trace(iris_trace_id=trace_id) return trace_id def get_id(): return get_trace().get('iris_trace_id') class TraceFormatter(logging.Formatter): def format(self, record): record.trace_id = get_id() return super(TraceFormatter, self).format(record)<|fim▁end|>
return g
<|file_name|>pybugger.py<|end_file_name|><|fim▁begin|>import random from pybugger import myaixterm<|fim▁hole|>color.aix_init() def string_constructor(args, foreground="normal", background="normal"): if foreground != "rainbow": foreground = "" if foreground == "normal" else color.aix_fg(foreground) background = "" if background == "normal" else color.aix_bg(background) res = foreground + background for arg in args: res += arg res = res + color.aix_normal() return res else: colors = color.get_all_colors() res = "" for arg in args: res += arg rainbow_string = "" for character in list(res): foreground = color.aix_bg(colors[getRandomKey(colors)]) background = color.aix_fg(colors[getRandomKey(colors)]) rainbow_string += foreground + background + character rainbow_string += color.aix_normal() return rainbow_string def getRandomKey(dictionary): return random.sample(list(dictionary), 1).pop() def default(*args): """Format the arguments with a default forgreound and background.""" print(string_constructor(args)) def success(*args): """Format the arguments with a green forgreound.""" print(string_constructor(args, "green")) def mega_success(*args): """Format the arguments with a white forgreound and a green background.""" print(string_constructor(args, "white", "green")) def warning(*args): """Format the arguments with a yellow forgreound.""" print(string_constructor(args, "yellow")) def mega_warning(*args): """Format the arguments with a white forgreound and a yellow background.""" print(string_constructor(args, "black", "fullyellow")) def info(*args): """Format the arguments with a cyan forgreound.""" print(string_constructor(args, "cyan")) def mega_info(*args): """Format the arguments with a white forgreound and a cyan background.""" print(string_constructor(args, "white", "cyan")) def error(*args): """Format the arguments with a red forgreound.""" print(string_constructor(args, "brightred")) def mega_error(*args): """Format the arguments with a white forgreound and a red background.""" print(string_constructor(args, "white", "red")) def randomize(*args): """Format the arguments with a random forgreound and background.""" print(string_constructor(args, "rainbow")) def inverted(*args): """Format the arguments with a black foreground and white background.""" print(string_constructor(args, "black", "white")) def custom(*args, delimiter='', fg="normal", bg="normal"): """Format the arguments with a custom foreground and background.""" debug_str = delimiter.join(args) print(string_constructor(debug_str, fg, bg)) def test(): """A test method to print out examples.""" print("") print("pybugger.success(*lyric)") success("\"We're no strangers to love,") print("") print("pybugger.mega_success(*lyric)") mega_success("You know the rules and so do I") print("") print("pybugger.info(*lyric)") info("A full commitment's what I'm thinking of") print("") print("pybugger.mega_info(*lyric)") mega_info("You wouldn't get this from any other guy") print("") print("pybugger.warning(*lyric)") warning("I just wanna tell you how I'm feeling") print("") print("pybugger.mega_warning(*lyric)") mega_warning("Gotta make you understand,") print("") print("pybugger.error(*lyric)") error("Never gonna give you up") print("") print("pybugger.mega_error(*lyric)") mega_error("Never gonna let you down") print("") print("pybugger.randomize(*lyric)") randomize("Never gonna run around and desert you") print("") print("pybugger.custom(lyric, \"color119\", \"color93\")") custom("Never gonna make you cry", "color119", "color93") print("") print("pybugger.inverted(*lyric)") inverted("Never gonna say goodbye.") print("") print("pybugger.default(*lyric)") default("Never gonna tell a lie and hurt you.\"") print("")<|fim▁end|>
color = myaixterm
<|file_name|>wct.conf.js<|end_file_name|><|fim▁begin|>module.exports = { 'plugins': {<|fim▁hole|> } } }<|fim▁end|>
'local': { 'browsers': [ 'chrome', 'firefox' ]
<|file_name|>LevelOrderJsonParser.java<|end_file_name|><|fim▁begin|>/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package michid.jsonjerk; import michid.jsonjerk.JsonValue.JsonArray; import michid.jsonjerk.JsonValue.JsonAtom; import michid.jsonjerk.JsonValue.JsonObject; import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.Map; /** * Utility class for parsing JSON objects and arrays into {@link JsonObject}s * and {@link JsonArray}s, respectively. In contrast to {@link FullJsonParser}, * this implementation resolves nested structures lazily. That, is it does a * level order traverse of the JSON tree. * <p/> * The parser looks for 'hints' in the JSON text to speed up parsing: when it * encounters an integer value with the key ":size" in an object, that value<|fim▁hole|> */ public final class LevelOrderJsonParser { private LevelOrderJsonParser() { } /** * Parse a JSON object from {@code tokenizer} * @param tokenizer * @return a {@code JsonObject} * @throws ParseException */ public static JsonObject parseObject(JsonTokenizer tokenizer) { ObjectHandler objectHandler = new ObjectHandler(); new JsonParser(objectHandler).parseObject(tokenizer); return objectHandler.getObject(); } /** * Parse a JSON array from {@code tokenizer} * @param tokenizer * @return a {@code JsonArray} * @throws ParseException */ public static JsonArray parseArray(JsonTokenizer tokenizer) { ArrayHandler arrayHandler = new ArrayHandler(); new JsonParser(arrayHandler).parseArray(tokenizer); return arrayHandler.getArray(); } /** * This implementation of a {@code JsonHandler} builds up a {@code JsonObject} * from its constituents. Nested objects are not fully parsed though, but a * reference to the parser is kept which is only invoked when that nested object * is actually accessed. */ public static class ObjectHandler extends JsonHandler { private final JsonObject object = new JsonObject(new LinkedHashMap<String, JsonValue>()); @Override public void atom(Token key, Token value) { object.put(key.text(), new JsonAtom(value)); } @Override public void object(JsonParser parser, Token key, JsonTokenizer tokenizer) { object.put(key.text(), new DeferredObjectValue(tokenizer.copy())); tokenizer.setPos(getNextPairPos(tokenizer.copy())); } @Override public void array(JsonParser parser, Token key, JsonTokenizer tokenizer) { object.put(key.text(), parseArray(tokenizer)); } public JsonObject getObject() { return object; } } /** * This implementation of a {@code JsonHandler} builds up a {@code JsonArray} * from its constituents. Nested objects are not fully parsed though, but a * reference to the parser is kept which is only invoked when that nested object * is actually accessed. */ public static class ArrayHandler extends JsonHandler { private final JsonArray array = new JsonArray(new ArrayList<JsonValue>()); @Override public void atom(Token key, Token value) { array.add(new JsonAtom(value)); } @Override public void object(JsonParser parser, Token key, JsonTokenizer tokenizer) { array.add(new DeferredObjectValue(tokenizer.copy())); tokenizer.setPos(getNextPairPos(tokenizer.copy())); } @Override public void array(JsonParser parser, Token key, JsonTokenizer tokenizer) { array.add(parseArray(tokenizer)); } public JsonArray getArray() { return array; } } //------------------------------------------< private >--- private static class BreakException extends RuntimeException{ private static final BreakException BREAK = new BreakException(); } private static int getNextPairPos(JsonTokenizer tokenizer) { SkipObjectHandler skipObjectHandler = new SkipObjectHandler(tokenizer.pos()); try { new JsonParser(skipObjectHandler).parseObject(tokenizer); } catch (BreakException e) { return skipObjectHandler.newPos; } return tokenizer.pos(); } private static class DeferredObjectValue extends JsonObject { private final JsonTokenizer tokenizer; public DeferredObjectValue(JsonTokenizer tokenizer) { super(null); this.tokenizer = tokenizer; } @Override public void put(String key, JsonValue value) { throw new IllegalStateException("Cannot add value"); } @Override public JsonValue get(String key) { return value().get(key); } @Override public Map<String, JsonValue> value() { return parseObject(tokenizer.copy()).value(); } @Override public String toString() { return "<deferred>"; } } private static class SkipObjectHandler extends JsonHandler { private final int startPos; private int newPos; public SkipObjectHandler(int startPos) { this.startPos = startPos; } @Override public void atom(Token key, Token value) { if (key != null && ":size".equals(key.text()) && Token.Type.NUMBER == value.type()) { newPos = startPos + Integer.parseInt(value.text()); throw BreakException.BREAK; } } } }<|fim▁end|>
* is used for the size of the entire object (including sub-objects). * * @see FullJsonParser
<|file_name|>model.py<|end_file_name|><|fim▁begin|># !/usr/bin/python # @package model # @author Attila Borcs # # Class for the deep neural net. Each class function wrapped with # a decorator function using python @property for unifying # the DNN functionalities when tensorflow graph initializer # called (tf.global_variables_initializer()) import functools import tensorflow as tf import matplotlib as mpl mpl.use('TkAgg') import matplotlib.pyplot as plt import numpy as np import params as prm import matplotlib.pyplot as plt import tensorflow.contrib.slim as slim from tensorflow.examples.tutorials.mnist import input_data def doublewrap(function): """ A decorator decorator, allowing to use the decorator to be used without parentheses if not arguments are provided. All arguments must be optional. credits: ttps://danijar.github.io/structuring-your-tensorflow-models """ @functools.wraps(function) def decorator(*args, **kwargs): if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): return function(args[0]) else: return lambda wrapee: function(wrapee, *args, **kwargs) return decorator @doublewrap def define_scope(function, scope=None, *args, **kwargs): """ A decorator for functions that define TensorFlow operations. The wrapped function will only be executed once. Subsequent calls to it will directly return the result so that operations are added to the graph only once. The operations added by the function live within a tf.variable_scope(). If this decorator is used with arguments, they will be forwarded to the variable scope. The scope name defaults to the name of the wrapped function. credits: ttps://danijar.github.io/structuring-your-tensorflow-models """ attribute = '_cache_' + function.__name__ name = scope or function.__name__ @property @functools.wraps(function) def decorator(self): if not hasattr(self, attribute): with tf.variable_scope(name, *args, **kwargs): setattr(self, attribute, function(self)) return getattr(self, attribute) <|fim▁hole|>class Model: """ This class responsible to build and wrap all of the functionalities of the tensor graph. Attributes of prediction, optimization and loss function will be stored under tensorflow variable scope. """ def __init__(self, image, label): self.image = image self.label = label self.prediction self.optimize self.error self.hidden_1 self.hidden_2 self.hidden_3 @define_scope(initializer=slim.xavier_initializer()) def prediction(self): x = self.image x_image = tf.reshape(x, [-1, prm.mnist_img_size, prm.mnist_img_size, 1]) self.hidden_1 = slim.conv2d(x_image, 5, [prm.conv_size, prm.conv_size]) pool_1 = slim.max_pool2d(self.hidden_1, [prm.max_pool_size, prm.max_pool_size]) self.hidden_2 = slim.conv2d(pool_1, 5, [prm.conv_size, prm.conv_size]) pool_2 = slim.max_pool2d(self.hidden_2, [prm.max_pool_size, prm.max_pool_size]) hidden_3 = slim.conv2d(pool_2, 20, [prm.conv_size, prm.conv_size]) self.hidden_3 = slim.dropout(hidden_3, 1.0) x = slim.fully_connected( slim.flatten(self.hidden_3), 10, activation_fn=tf.nn.softmax) return x @define_scope def optimize(self): logprob = tf.log(self.prediction + 1e-12) cross_entropy = -tf.reduce_sum(self.label * logprob) optimizer = tf.train.AdamOptimizer(1e-4) return optimizer.minimize(cross_entropy) @define_scope def error(self): mistakes = tf.not_equal( tf.argmax(self.label, 1), tf.argmax(self.prediction, 1)) return tf.reduce_mean(tf.cast(mistakes, tf.float32))<|fim▁end|>
return decorator
<|file_name|>my.js<|end_file_name|><|fim▁begin|>/* this is all example code which should be changed; see query.js for how it works */ authUrl = "http://importio-signedserver.herokuapp.com/"; reEx.push(/\/_source$/); /*<|fim▁hole|> doQuery();//query on ready } */ //change doReady() to add autocomplete-related events // http://jqueryui.com/autocomplete/ http://api.jqueryui.com/autocomplete/ var acField;//autocomplete data field var acSel;//autocomplete input selector var acsSel = "#autocomplete-spin";//autocomplete spinner selector var cache = {};//autocomplete cache var termCur = "";//autocomplete current term var doReadyOrg = doReady; doReady = function() { doReadyOrg(); $(acSel) .focus() .bind("keydown", function(event) { // http://api.jqueryui.com/jQuery.ui.keyCode/ switch(event.keyCode) { //don't fire autocomplete on certain keys case $.ui.keyCode.LEFT: case $.ui.keyCode.RIGHT: event.stopImmediatePropagation(); return true; break; //submit form on enter case $.ui.keyCode.ENTER: doQuery(); $(this).autocomplete("close"); break; } }) .autocomplete({ minLength: 3, source: function(request, response) { var term = request.term.replace(/[^\w\s]/gi, '').trim().toUpperCase();//replace all but "words" [A-Za-z0-9_] & whitespaces if (term in cache) { doneCompleteCallbackStop(); response(cache[term]); return; } termCur = term; if (spinOpts) { $(acsSel).spin(spinOpts); } cache[term] = []; doComplete(term); response(cache[term]);//send empty for now } }); }; function doComplete(term) { doQueryMy(); var qObjComplete = jQuery.extend({}, qObj);//copy to new obj qObjComplete.maxPages = 1; importio.query(qObjComplete, { "data": function(data) { dataCompleteCallback(data, term); }, "done": function(data) { doneCompleteCallback(data, term); } } ); } var dataCompleteCallback = function(data, term) { console.log("Data received", data); for (var i = 0; i < data.length; i++) { var d = data[i]; var c = d.data[acField]; if (typeof filterComplete === 'function') { c = filterComplete(c); } c = c.trim(); if (!c) { continue; } cache[term].push(c); } } var doneCompleteCallback = function(data, term) { console.log("Done, all data:", data); console.log("cache:", cache); // http://stackoverflow.com/questions/16747798/delete-duplicate-elements-from-an-array cache[term] = cache[term].filter( function(elem, index, self) { return index == self.indexOf(elem); }); if (termCur != term) { return; } doneCompleteCallbackStop(); $(acSel).trigger("keydown"); } var doneCompleteCallbackStop = function() { termCur = ""; if (spinOpts) { $(acsSel).spin(false); } } /* Query for tile Store Locators */ fFields.push({id: "postcode", html: '<input id="postcode" type="text" value="EC2M 4TP" />'}); fFields.push({id: "autocomplete-spin", html: '<span id="autocomplete-spin"></span>'}); fFields.push({id: "submit", html: '<button id="submit" onclick="doQuery();">Query</button>'}); acField = "address"; var filterComplete = function(val) { if (val.indexOf(", ") == -1) { return ""; } return val.split(", ").pop(); } acSel = "#postcode"; qObj.connectorGuids = [ "8f628f9d-b564-4888-bc99-1fb54b2df7df", "7290b98f-5bc0-4055-a5df-d7639382c9c3", "14d71ff7-b58f-4b37-bb5b-e2475bdb6eb9", "9c99f396-2b8c-41e0-9799-38b039fe19cc", "a0087993-5673-4d62-a5ae-62c67c1bcc40" ]; var doQueryMy = function() { qObj.input = { "postcode": $("#postcode").val() }; } /* Here's some other example code for a completely different API fFields.push({id: "title", html: '<input id="title" type="text" value="harry potter" />'}); fFields.push({id: "autocomplete-spin", html: '<span id="autocomplete-spin"></span>'}); fFields.push({id: "submit", html: '<button id="submit" onclick="doQuery();">Query</button>'}); acField = "title"; acSel = "#title"; filters["image"] = function(val, row) { return '<a href="' + val + '" target="_blank">' + val + '</a>'; } qObj.connectorGuids = [ "ABC" ]; var doQueryMy = function() { qObj.input = { "search": $("#title").val() }; } */ /* Here's some other example code for a completely different API colNames = ["ranking", "title", "artist", "album", "peak_pos", "last_pos", "weeks", "image", "spotify", "rdio", "video"]; filters["title"] = function(val, row) { return "<b>" + val + "</b>"; } filters["video"] = function(val, row) { if (val.substring(0, 7) != "http://") { return val; } return '<a href="' + val + '" target="_blank">' + val + '</a>'; } doQuery = function() { doQueryPre(); for (var page = 0; page < 10; page++) { importio.query({ "connectorGuids": [ "XYZ" ], "input": { "webpage/url": "http://www.billboard.com/charts/hot-100?page=" + page } }, { "data": dataCallback, "done": doneCallback }); } } */<|fim▁end|>
//change doReady() to auto-query on document ready var doReadyOrg = doReady; doReady = function() { doReadyOrg();
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Copyright 2020 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. mod guarded_queue; mod sandbox_tests_helper; mod supervisor; mod tests; mod timestamping; use bit_vec::BitVec; use exonum::{ blockchain::{ config::{GenesisConfig, GenesisConfigBuilder, InstanceInitParams}, AdditionalHeaders, Block, BlockParams, BlockProof, Blockchain, BlockchainBuilder, BlockchainMut, ConsensusConfig, Epoch, PersistentPool, ProposerId, Schema, SkipFlag, TransactionCache, ValidatorKeys, }, crypto::{Hash, KeyPair, PublicKey, SecretKey, Seed, SEED_LENGTH}, helpers::{user_agent, Height, Round, ValidatorId}, keys::Keys, merkledb::{BinaryValue, HashTag, MapProof, ObjectHash, Snapshot, SystemSchema, TemporaryDB}, messages::{AnyTx, Precommit, SignedMessage, Verified}, runtime::{ArtifactId, SnapshotExt}, }; use exonum_rust_runtime::{DefaultInstance, RustRuntimeBuilder, ServiceFactory}; use futures::{channel::mpsc, prelude::*}; use std::{ cell::{Ref, RefCell, RefMut}, collections::{BTreeSet, BinaryHeap, HashMap, HashSet, VecDeque}, convert::TryFrom, fmt::Debug, iter::FromIterator, net::{IpAddr, Ipv4Addr, SocketAddr}, ops, sync::{Arc, Mutex}, time::{Duration, SystemTime, UNIX_EPOCH}, }; use self::{ guarded_queue::GuardedQueue, sandbox_tests_helper::{BlockBuilder, PROPOSE_TIMEOUT}, supervisor::SupervisorService, timestamping::TimestampingService, }; use crate::{ connect_list::ConnectList, events::{ Event, EventHandler, InternalEvent, InternalRequest, NetworkEvent, NetworkRequest, SyncSender, TimeoutRequest, }, messages::{ BlockRequest, BlockResponse, Connect, ExonumMessage, Message, PeersRequest, PoolTransactionsRequest, Prevote, PrevotesRequest, Propose, ProposeRequest, Status, TransactionsRequest, TransactionsResponse, }, pool::{ManagePool, StandardPoolManager}, state::State, ApiSender, Configuration, ConnectInfo, ConnectListConfig, ExternalMessage, MemoryPoolConfig, NetworkConfiguration, NodeHandler, NodeSender, SharedNodeState, SystemStateProvider, }; pub type SharedTime = Arc<Mutex<SystemTime>>; pub type Milliseconds = u64; const INITIAL_TIME_IN_SECS: u64 = 1_486_720_340; #[derive(Debug)] pub struct SandboxSystemStateProvider { listen_address: SocketAddr, shared_time: SharedTime, } impl SystemStateProvider for SandboxSystemStateProvider { fn current_time(&self) -> SystemTime { *self.shared_time.lock().unwrap() } fn listen_address(&self) -> SocketAddr { self.listen_address } } #[derive(Debug)] struct SandboxInner { pub time: SharedTime, pub handler: NodeHandler, pub sent: GuardedQueue, pub _events: VecDeque<Event>, pub timers: BinaryHeap<TimeoutRequest>, pub network_requests_rx: mpsc::Receiver<NetworkRequest>, pub internal_requests_rx: mpsc::Receiver<InternalRequest>, pub api_requests_rx: mpsc::Receiver<ExternalMessage>, pub transactions_rx: mpsc::Receiver<Verified<AnyTx>>, } impl SandboxInner { pub fn process_events(&mut self) { self.process_internal_requests(); self.process_api_requests(); self.process_network_requests(); self.process_internal_requests(); } pub fn handle_event<E: Into<Event>>(&mut self, e: E) { self.handler.handle_event(e.into()); self.process_events(); } fn next_event<T>(rx: &mut mpsc::Receiver<T>) -> Option<T> { rx.next().now_or_never().flatten() } fn process_network_requests(&mut self) { while let Some(network) = Self::next_event(&mut self.network_requests_rx) { match network { NetworkRequest::SendMessage(peer, msg) => { let msg = Message::from_signed(msg).expect("Expected valid message."); self.sent.push_back((peer, msg)); } NetworkRequest::DisconnectWithPeer(_) => {} } } } fn process_internal_requests(&mut self) { while let Some(internal) = Self::next_event(&mut self.internal_requests_rx) { match internal { InternalRequest::Timeout(t) => self.timers.push(t), InternalRequest::JumpToRound(height, round) => { self.handler .handle_event(InternalEvent::jump_to_round(height, round).into()); } InternalRequest::VerifyMessage(raw) => { let msg = SignedMessage::from_bytes(raw.into()) .and_then(SignedMessage::into_verified::<ExonumMessage>) .map(Message::from) .unwrap(); self.handler .handle_event(InternalEvent::message_verified(msg).into()); } } } } fn process_api_requests(&mut self) { while let Some(api) = Self::next_event(&mut self.api_requests_rx) { self.handler.handle_event(api.into()); } while let Some(tx) = Self::next_event(&mut self.transactions_rx) { self.handler.handle_event(tx.into()); } } } #[derive(Debug)] pub struct Sandbox { pub validators_map: HashMap<PublicKey, SecretKey>, pub services_map: HashMap<PublicKey, SecretKey>, pub api_sender: ApiSender, inner: RefCell<SandboxInner>, addresses: Vec<ConnectInfo>, /// Connect message used during initialization. connect: Option<Verified<Connect>>, } impl Sandbox { pub fn initialize( &mut self, connect_message_time: SystemTime, start_index: usize, end_index: usize, ) { let connect = Self::create_connect( &self.public_key(ValidatorId(0)), self.address(ValidatorId(0)), connect_message_time.into(), &user_agent(), self.secret_key(ValidatorId(0)), ); for validator in start_index..end_index { let validator = ValidatorId(validator as u16); self.recv(&Self::create_connect( &self.public_key(validator), self.address(validator), self.time().into(), &user_agent(), self.secret_key(validator), )); self.send(self.public_key(validator), &connect); } self.check_unexpected_message(); self.connect = Some(connect); } fn check_unexpected_message(&self) { if let Some((addr, msg)) = self.pop_sent_message() { panic!("Sent unexpected message {:?} to {}", msg, addr); } } pub fn public_key(&self, id: ValidatorId) -> PublicKey { self.validators()[id.0 as usize] } pub fn secret_key(&self, id: ValidatorId) -> &SecretKey { let p = self.public_key(id); &self.validators_map[&p] } pub fn address(&self, id: ValidatorId) -> String { let id: usize = id.into(); self.addresses[id].address.clone() } /// Creates a `BlockRequest` message signed by this validator. pub fn create_block_request( author: PublicKey, to: PublicKey, height: Height, secret_key: &SecretKey, ) -> Verified<BlockRequest> { Verified::from_value(BlockRequest::new(to, height), author, secret_key) } /// Creates a `BlockRequest` message signed by this validator. pub fn create_full_block_request( author: PublicKey, to: PublicKey, block_height: Height, epoch: Height, secret_key: &SecretKey, ) -> Verified<BlockRequest> { let request = BlockRequest::with_epoch(to, block_height, epoch); Verified::from_value(request, author, secret_key) } /// Creates a `Status` message signed by this validator. pub fn create_status( author: PublicKey, epoch: Height, last_hash: Hash, pool_size: u64, secret_key: &SecretKey, ) -> Verified<Status> { Verified::from_value( Status::new(epoch, epoch, last_hash, pool_size), author, secret_key, ) } /// Creates signed `Status` with the next height from the specified validator. pub fn create_status_with_custom_epoch( &self, from: ValidatorId, blockchain_height: Height, epoch: Height, ) -> Verified<Status> { assert!(blockchain_height <= epoch); let last_hash = self.last_hash(); Verified::from_value( Status::new(epoch, blockchain_height, last_hash, 0), self.public_key(from), self.secret_key(from), ) } pub fn create_our_status( &self, epoch: Height, blockchain_height: Height, pool_size: u64, ) -> Verified<Status> { let last_hash = self.last_hash(); Verified::from_value( Status::new(epoch, blockchain_height, last_hash, pool_size), self.public_key(ValidatorId(0)), self.secret_key(ValidatorId(0)), ) } /// Creates a `BlockResponse` message signed by this validator. pub fn create_block_response( public_key: PublicKey, to: PublicKey, block: Block, precommits: impl IntoIterator<Item = Verified<Precommit>>, tx_hashes: impl IntoIterator<Item = Hash>, secret_key: &SecretKey, ) -> Verified<BlockResponse> { Verified::from_value( BlockResponse::new( to, block, precommits.into_iter().map(Verified::into_bytes), tx_hashes, ), public_key, secret_key, ) } /// Creates a `Connect` message signed by this validator. pub fn create_connect( public_key: &PublicKey, addr: String, time: chrono::DateTime<chrono::Utc>, user_agent: &str, secret_key: &SecretKey, ) -> Verified<Connect> { Verified::from_value( Connect::new(addr, time, user_agent), *public_key, secret_key, ) } /// Creates a `PeersRequest` message signed by this validator. pub fn create_peers_request( public_key: PublicKey, to: PublicKey, secret_key: &SecretKey, ) -> Verified<PeersRequest> { Verified::from_value(PeersRequest::new(to), public_key, secret_key) } /// Creates a `PoolTransactionsRequest` message signed by this validator. pub fn create_pool_transactions_request( public_key: PublicKey, to: PublicKey, secret_key: &SecretKey, ) -> Verified<PoolTransactionsRequest> { Verified::from_value(PoolTransactionsRequest::new(to), public_key, secret_key) } /// Creates a `Propose` message signed by the specified validator. pub fn create_propose( &self, validator_id: ValidatorId, epoch: Height, round: Round, last_hash: Hash, tx_hashes: impl IntoIterator<Item = Hash>, secret_key: &SecretKey, ) -> Verified<Propose> { Verified::from_value( Propose::new(validator_id, epoch, round, last_hash, tx_hashes), self.public_key(validator_id), secret_key, ) } /// Creates a `Propose` message to skip a block signed by the specified validator. pub fn create_skip_propose( &self, validator_id: ValidatorId, epoch: Height, round: Round, last_hash: Hash, secret_key: &SecretKey, ) -> Verified<Propose> { Verified::from_value( Propose::skip(validator_id, epoch, round, last_hash), self.public_key(validator_id), secret_key, ) } /// Creates a `Precommit` message signed by this validator. #[allow(clippy::too_many_arguments)] pub fn create_precommit( &self, validator_id: ValidatorId, epoch: Height, propose_round: Round, propose_hash: Hash, block_hash: Hash, system_time: chrono::DateTime<chrono::Utc>, secret_key: &SecretKey, ) -> Verified<Precommit> { Verified::from_value( Precommit::new( validator_id, epoch, propose_round, propose_hash, block_hash, system_time, ), self.public_key(validator_id), secret_key, ) } /// Creates a `Precommit` message signed by this validator. pub fn create_prevote( &self, validator_id: ValidatorId, epoch: Height, propose_round: Round, propose_hash: Hash, locked_round: Round, secret_key: &SecretKey, ) -> Verified<Prevote> { Verified::from_value( Prevote::new( validator_id, epoch, propose_round, propose_hash, locked_round, ), self.public_key(validator_id), secret_key, ) } /// Creates a `PrevoteRequest` message signed by this validator. #[allow(clippy::too_many_arguments)] pub fn create_prevote_request( from: PublicKey, to: PublicKey, epoch: Height, round: Round, propose_hash: Hash, validators: BitVec, secret_key: &SecretKey, ) -> Verified<PrevotesRequest> { Verified::from_value( PrevotesRequest::new(to, epoch, round, propose_hash, validators), from, secret_key, ) } /// Creates a `ProposeRequest` message signed by this validator. pub fn create_propose_request( author: PublicKey, to: PublicKey, epoch: Height, propose_hash: Hash, secret_key: &SecretKey, ) -> Verified<ProposeRequest> { Verified::from_value( ProposeRequest::new(to, epoch, propose_hash), author, secret_key, ) } /// Creates a `TransactionsRequest` message signed by this validator. pub fn create_transactions_request( author: PublicKey, to: PublicKey, txs: impl IntoIterator<Item = Hash>, secret_key: &SecretKey, ) -> Verified<TransactionsRequest> { Verified::from_value(TransactionsRequest::new(to, txs), author, secret_key) } /// Creates a `TransactionsResponse` message signed by this validator. pub fn create_transactions_response( author: PublicKey, to: PublicKey, txs: impl IntoIterator<Item = Verified<AnyTx>>, secret_key: &SecretKey, ) -> Verified<TransactionsResponse> { Verified::from_value( TransactionsResponse::new(to, txs.into_iter().map(Verified::into_bytes)), author, secret_key, ) } pub fn validators(&self) -> Vec<PublicKey> { self.cfg() .validator_keys .iter() .map(|x| x.consensus_key) .collect() } #[allow(clippy::let_and_return)] pub fn time(&self) -> SystemTime { let inner = self.inner.borrow(); let time = *inner.time.lock().unwrap(); time } pub(crate) fn node_state(&self) -> Ref<'_, State> { Ref::map(self.inner.borrow(), |inner| inner.handler.state()) } pub fn blockchain(&self) -> Blockchain { self.inner.borrow().handler.blockchain.as_ref().clone() } pub fn blockchain_mut(&self) -> impl ops::DerefMut<Target = BlockchainMut> + '_ { RefMut::map(self.inner.borrow_mut(), |inner| { &mut inner.handler.blockchain }) } /// Returns connect message used during initialization. pub fn connect(&self) -> Option<&Verified<Connect>> { self.connect.as_ref() } pub fn recv<T: TryFrom<SignedMessage>>(&self, msg: &Verified<T>) { self.check_unexpected_message(); let event = NetworkEvent::MessageReceived(msg.as_raw().to_bytes()); self.inner.borrow_mut().handle_event(event); } pub fn process_events(&self) { self.inner.borrow_mut().process_events(); } pub fn pop_sent_message(&self) -> Option<(PublicKey, Message)> { self.inner.borrow_mut().sent.pop_front() } pub fn send<T>(&self, key: PublicKey, expected_msg: &Verified<T>) where T: TryFrom<SignedMessage> + Debug, { self.process_events(); if let Some((real_addr, real_msg)) = self.pop_sent_message() { assert_eq!( expected_msg.as_raw(), real_msg.as_raw(), "Expected to send other message" ); assert_eq!( key, real_addr, "Expected to send message to other recipient" ); } else { panic!( "Expected to send the message {:?} to {} but nothing happened", expected_msg, key ); } } pub fn send_peers_request(&self) { self.process_events(); let (addr, msg) = self .pop_sent_message() .expect("Expected to send the PeersRequest message but nothing happened"); let peers_request = Verified::<PeersRequest>::try_from(msg) .expect("Incorrect message. PeersRequest was expected"); let id = self .addresses .iter() .position(|connect_info| connect_info.public_key == addr) .unwrap_or_else(|| { panic!("Sending PeersRequest to unknown peer {:?}", addr); }); assert_eq!( self.public_key(ValidatorId(id as u16)), peers_request.payload().to ); } pub fn broadcast<T>(&self, msg: &Verified<T>) where T: TryFrom<SignedMessage> + Debug, { self.broadcast_to_addrs(msg, self.addresses.iter().map(|i| &i.public_key).skip(1)); } pub fn try_broadcast<T>(&self, msg: &Verified<T>) -> Result<(), String> where T: TryFrom<SignedMessage> + Debug, { self.try_broadcast_to_addrs(msg, self.addresses.iter().map(|i| &i.public_key).skip(1)) } pub fn broadcast_to_addrs<'a, T, I>(&self, msg: &Verified<T>, addresses: I) where T: TryFrom<SignedMessage> + Debug, I: IntoIterator<Item = &'a PublicKey>, { self.try_broadcast_to_addrs(msg, addresses).unwrap(); } #[allow(clippy::unnecessary_wraps)] pub fn try_broadcast_to_addrs<'a, T, I>( &self, msg: &Verified<T>, addresses: I, ) -> Result<(), String> where T: TryFrom<SignedMessage> + Debug, I: IntoIterator<Item = &'a PublicKey>, { let expected_msg = Message::from_signed(msg.as_raw().clone()) .expect("Can't obtain `Message` from `Verified`"); // If node is excluded from validators, then it still will broadcast messages. // So in that case we should not skip addresses and validators count. let mut expected_set: HashSet<_> = HashSet::from_iter(addresses); for _ in 0..expected_set.len() { if let Some((real_addr, real_msg)) = self.pop_sent_message() { assert_eq!( expected_msg, real_msg, "Expected to broadcast other message", ); if expected_set.contains(&real_addr) { expected_set.remove(&real_addr); } else { panic!( "Double send the same message {:?} to {:?} during broadcasting", msg, real_addr ); } } else { panic!( "Expected to broadcast the message {:?} but someone don't receive \ messages: {:?}", msg, expected_set ); } } Ok(()) } pub fn check_broadcast_status(&self, height: Height, block_hash: Hash) { self.broadcast(&Self::create_status( self.node_public_key(), height, block_hash, 0, &self.node_secret_key(), )); } pub fn add_time(&self, duration: Duration) { self.check_unexpected_message(); let now = { let inner = self.inner.borrow_mut(); let mut time = inner.time.lock().unwrap(); *time += duration; *time }; // Handle timeouts. loop { let timeout = { let timers = &mut self.inner.borrow_mut().timers; if let Some(request) = timers.pop() { if request.time() > now { timers.push(request); break; } request.event() } else { break; } }; self.inner.borrow_mut().handle_event(timeout); } } pub fn is_leader(&self) -> bool { self.node_state().is_leader() } pub fn leader(&self, round: Round) -> ValidatorId { self.node_state().leader(round) } pub fn last_block(&self) -> Block { self.blockchain().last_block() } pub fn last_hash(&self) -> Hash { self.blockchain().last_hash() } pub fn last_state_hash(&self) -> Hash { self.last_block().state_hash } pub fn filter_present_transactions<'a, I>(&self, txs: I) -> Vec<Verified<AnyTx>> where I: IntoIterator<Item = &'a Verified<AnyTx>>, { let mut unique_set: HashSet<Hash> = HashSet::new(); let snapshot = self.blockchain().snapshot(); let node_state = self.node_state(); let tx_cache = PersistentPool::new(&snapshot, node_state.tx_cache()); txs.into_iter() .filter(|transaction| { let tx_hash = transaction.object_hash(); if unique_set.contains(&tx_hash) { return false; } unique_set.insert(tx_hash); !tx_cache.contains_transaction(tx_hash) }) .cloned() .collect() } /// Extracts `state_hash` and `error_hash` from the fake block. /// /// **NB.** This method does not correctly process transactions that mutate the `Dispatcher`, /// e.g., starting new services. pub fn compute_block_hashes(&self, txs: &[Verified<AnyTx>]) -> (Hash, Hash) { let mut blockchain = self.blockchain_mut(); let mut hashes = vec![]; let mut recover = BTreeSet::new(); let fork = blockchain.fork(); let mut schema = Schema::new(&fork); for raw in txs { let hash = raw.object_hash(); hashes.push(hash); if schema.transactions().get(&hash).is_none() { recover.insert(hash); schema.add_transaction_into_pool(raw.clone()); } } blockchain.merge(fork.into_patch()).unwrap(); let block_data = BlockParams::new(ValidatorId(0), Height(0), &hashes); let patch = blockchain.create_patch(block_data, &()); let fork = blockchain.fork(); let mut schema = Schema::new(&fork); for hash in recover { assert!(schema.reject_transaction(hash)); } blockchain.merge(fork.into_patch()).unwrap(); let block = (patch.as_ref() as &dyn Snapshot).for_core().last_block(); (block.state_hash, block.error_hash) } pub fn create_block(&self, txs: &[Verified<AnyTx>]) -> Block { let tx_hashes: Vec<_> = txs.iter().map(ObjectHash::object_hash).collect(); let (state_hash, error_hash) = self.compute_block_hashes(txs); BlockBuilder::new(self) .with_txs_hashes(&tx_hashes) .with_state_hash(&state_hash) .with_error_hash(&error_hash) .build() } pub fn create_block_skip(&self) -> Block { let mut block = Block { height: self.current_blockchain_height().previous(), tx_count: 0, prev_hash: self.last_hash(), tx_hash: HashTag::empty_list_hash(), state_hash: self.last_block().state_hash, error_hash: HashTag::empty_map_hash(), additional_headers: AdditionalHeaders::default(), }; block .additional_headers .insert::<ProposerId>(self.current_leader()); block .additional_headers .insert::<Epoch>(self.current_epoch()); block.additional_headers.insert::<SkipFlag>(()); block } pub fn get_proof_to_index(&self, index_name: &str) -> MapProof<String, Hash> { let snapshot = self.blockchain().snapshot(); SystemSchema::new(&snapshot) .state_aggregator() .get_proof(index_name.to_owned()) } pub fn get_configs_merkle_root(&self) -> Hash { let snapshot = self.blockchain().snapshot(); let schema = snapshot.for_core(); schema.consensus_config().object_hash() } pub fn cfg(&self) -> ConsensusConfig { let snapshot = self.blockchain().snapshot(); let schema = snapshot.for_core(); schema.consensus_config() } pub fn majority_count(num_validators: usize) -> usize { num_validators * 2 / 3 + 1 } pub fn first_round_timeout(&self) -> Milliseconds { self.cfg().first_round_timeout } pub fn round_timeout_increase(&self) -> Milliseconds { (self.cfg().first_round_timeout * ConsensusConfig::TIMEOUT_LINEAR_INCREASE_PERCENT) / 100 } pub fn current_round_timeout(&self) -> Milliseconds { let previous_round: u64 = self.current_round().previous().into(); self.first_round_timeout() + previous_round * self.round_timeout_increase() } pub fn transactions_hashes(&self) -> Vec<Hash> { let snapshot = self.blockchain().snapshot(); let schema = snapshot.for_core(); let idx = schema.transactions_pool(); let mut vec: Vec<Hash> = idx.iter().collect(); vec.extend(self.node_state().tx_cache().keys().copied()); vec } pub fn current_round(&self) -> Round { self.node_state().round() } pub fn block_and_precommits(&self, height: Height) -> Option<BlockProof> { let snapshot = self.blockchain().snapshot(); let schema = snapshot.for_core(); schema.block_and_precommits(height) } pub fn block_skip_and_precommits(&self) -> Option<BlockProof> { let snapshot = self.blockchain().snapshot(); let schema = snapshot.for_core(); schema.block_skip_and_precommits() } pub fn current_epoch(&self) -> Height { self.node_state().epoch() } pub fn current_blockchain_height(&self) -> Height { self.node_state().blockchain_height() } pub fn current_leader(&self) -> ValidatorId { self.node_state().leader(self.current_round()) } pub fn assert_state(&self, expected_epoch: Height, expected_round: Round) { let state = self.node_state(); let actual_epoch = state.epoch(); let actual_round = state.round(); assert_eq!(actual_epoch, expected_epoch); assert_eq!(actual_round, expected_round); } pub fn assert_pool_len(&self, expected: u64) { let snapshot = self.blockchain().snapshot(); let schema = snapshot.for_core(); assert_eq!(expected, schema.transactions_pool_len()); } pub fn assert_tx_cache_len(&self, expected: u64) { assert_eq!(expected, self.node_state().tx_cache_len() as u64); } pub fn assert_lock(&self, expected_round: Round, expected_hash: Option<Hash>) { let state = self.node_state(); let actual_round = state.locked_round(); let actual_hash = state.locked_propose(); assert_eq!(actual_round, expected_round); assert_eq!(actual_hash, expected_hash); } /// Creates new sandbox with "restarted" node. pub fn restart(self) -> Self { self.restart_with_time(UNIX_EPOCH + Duration::new(INITIAL_TIME_IN_SECS, 0))<|fim▁hole|> /// Creates new sandbox with "restarted" node initialized by the given time. pub fn restart_with_time(self, time: SystemTime) -> Self { let connect = self.connect().map(|c| { Self::create_connect( &c.author(), c.payload().host.parse().expect("Expected resolved address"), time.into(), c.payload().user_agent(), self.secret_key(ValidatorId(0)), ) }); let sandbox = self.restart_uninitialized_with_time(time); if let Some(connect) = connect { sandbox.broadcast(&connect); } sandbox } /// Constructs a new uninitialized instance of a `Sandbox` preserving database and /// configuration. pub fn restart_uninitialized(self) -> Self { self.restart_uninitialized_with_time(UNIX_EPOCH + Duration::new(INITIAL_TIME_IN_SECS, 0)) } /// Constructs a new uninitialized instance of a `Sandbox` preserving database and /// configuration. pub fn restart_uninitialized_with_time(self, time: SystemTime) -> Self { let network_channel = mpsc::channel(100); let internal_channel = mpsc::channel(100); let tx_channel = mpsc::channel(100); let api_channel = mpsc::channel(100); let address: SocketAddr = self .address(ValidatorId(0)) .parse() .expect("Failed to parse socket address"); let inner = self.inner.into_inner(); let node_sender = NodeSender { network_requests: SyncSender::new(network_channel.0.clone(), "network request"), internal_requests: SyncSender::new(internal_channel.0.clone(), "internal request"), _transactions: tx_channel.0.clone(), _api_requests: api_channel.0, }; let peers = inner .handler .state() .peers() .iter() .map(|(pk, connect)| (*pk, connect.clone())); let connect_list = ConnectList::from_peers(peers); let keys = inner.handler.state().keys().clone(); let config = Configuration { connect_list, network: NetworkConfiguration::default(), peer_discovery: Vec::new(), mempool: MemoryPoolConfig::default(), keys, }; let shared_time = SharedTime::new(Mutex::new(time)); let system_state = SandboxSystemStateProvider { listen_address: address, shared_time: shared_time.clone(), }; let blockchain = inner.handler.blockchain; let mut handler = NodeHandler::new( blockchain, &address.to_string(), node_sender, Box::new(system_state), config, inner.handler.api_state.clone(), None, Box::new(StandardPoolManager::default()), ); handler.initialize(); let inner = SandboxInner { sent: GuardedQueue::default(), _events: VecDeque::new(), timers: BinaryHeap::new(), internal_requests_rx: internal_channel.1, network_requests_rx: network_channel.1, api_requests_rx: api_channel.1, transactions_rx: tx_channel.1, handler, time: shared_time, }; let sandbox = Self { inner: RefCell::new(inner), validators_map: self.validators_map, services_map: self.services_map, api_sender: ApiSender::new(tx_channel.0), addresses: self.addresses, connect: None, }; sandbox.process_events(); sandbox } fn node_public_key(&self) -> PublicKey { self.node_state().keys().consensus_pk() } fn node_secret_key(&self) -> SecretKey { self.node_state().keys().consensus_sk().clone() } } #[derive(Debug)] pub struct SandboxBuilder { initialize: bool, _services: Vec<InstanceInitParams>, validators_count: u8, consensus_config: ConsensusConfig, rust_runtime: RustRuntimeBuilder, instances: Vec<InstanceInitParams>, artifacts: HashMap<ArtifactId, Vec<u8>>, pool_manager: Box<dyn ManagePool>, } impl Default for SandboxBuilder { fn default() -> Self { use exonum::blockchain::ConsensusConfigBuilder; let consensus_config = ConsensusConfigBuilder::new() .validator_keys(Vec::default()) .first_round_timeout(1000) .status_timeout(600_000) .peers_timeout(600_000) .txs_block_limit(1000) .max_message_len(1024 * 1024) .min_propose_timeout(PROPOSE_TIMEOUT) .max_propose_timeout(PROPOSE_TIMEOUT) .propose_timeout_threshold(u32::max_value()) .build(); Self { initialize: true, _services: Vec::new(), validators_count: 4, consensus_config, rust_runtime: RustRuntimeBuilder::new(), instances: Vec::new(), artifacts: HashMap::new(), pool_manager: Box::new(StandardPoolManager::default()), } } } impl SandboxBuilder { pub fn new() -> Self { Self::default() } pub fn do_not_initialize_connections(mut self) -> Self { self.initialize = false; self } pub fn with_consensus<F: FnOnce(&mut ConsensusConfig)>(mut self, update: F) -> Self { update(&mut self.consensus_config); self } pub fn with_validators(mut self, n: u8) -> Self { self.validators_count = n; self } /// Adds a Rust service that has default instance configuration to the testkit. Corresponding /// artifact and default instance are added implicitly. pub fn with_default_rust_service(self, service: impl DefaultInstance) -> Self { self.with_artifact(service.artifact_id()) .with_instance(service.default_instance()) .with_rust_service(service) } /// Customizes block proposal creation. pub fn with_pool_manager(mut self, manager: impl ManagePool + 'static) -> Self { self.pool_manager = Box::new(manager); self } /// Adds instances descriptions to the testkit that will be used for specification of builtin /// services of testing blockchain. pub fn with_instance(mut self, instance: impl Into<InstanceInitParams>) -> Self { self.instances.push(instance.into()); self } /// Adds an artifact with no deploy argument. Does nothing in case artifact with given id is /// already added. pub fn with_artifact(self, artifact: impl Into<ArtifactId>) -> Self { self.with_parametric_artifact(artifact, ()) } /// Adds an artifact with corresponding deploy argument. Does nothing in case artifact with /// given id is already added. pub fn with_parametric_artifact( mut self, artifact: impl Into<ArtifactId>, payload: impl BinaryValue, ) -> Self { let artifact = artifact.into(); self.artifacts .entry(artifact) .or_insert_with(|| payload.into_bytes()); self } /// Adds a Rust service to the sandbox. pub fn with_rust_service<S: ServiceFactory>(mut self, service: S) -> Self { self.rust_runtime = self.rust_runtime.with_factory(service); self } pub fn build(self) -> Sandbox { let mut sandbox = sandbox_with_services_uninitialized( self.rust_runtime, self.artifacts, self.instances, self.consensus_config, self.validators_count, ); sandbox.inner.borrow_mut().handler.pool_manager = self.pool_manager; sandbox.inner.borrow_mut().sent.clear(); // To clear initial connect messages. if self.initialize { let time = sandbox.time(); sandbox.initialize(time, 1, self.validators_count as usize); } sandbox } } fn gen_primitive_socket_addr(idx: u8) -> SocketAddr { let addr = Ipv4Addr::new(idx, idx, idx, idx); SocketAddr::new(IpAddr::V4(addr), u16::from(idx)) } /// Creates and initializes `GenesisConfig` with the provided information. fn create_genesis_config( consensus_config: ConsensusConfig, artifacts: HashMap<ArtifactId, Vec<u8>>, instances: Vec<InstanceInitParams>, ) -> GenesisConfig { let genesis_config_builder = instances.into_iter().fold( GenesisConfigBuilder::with_consensus_config(consensus_config), GenesisConfigBuilder::with_instance, ); artifacts .into_iter() .fold(genesis_config_builder, |builder, (artifact, payload)| { builder.with_parametric_artifact(artifact, payload) }) .build() } /// Constructs an uninitialized instance of a `Sandbox`. #[allow(clippy::too_many_lines)] fn sandbox_with_services_uninitialized( rust_runtime: RustRuntimeBuilder, artifacts: HashMap<ArtifactId, Vec<u8>>, instances: Vec<InstanceInitParams>, consensus: ConsensusConfig, validators_count: u8, ) -> Sandbox { let keys = (0..validators_count) .map(|i| { ( KeyPair::from_seed(&Seed::new([i; SEED_LENGTH])), KeyPair::from_seed(&Seed::new([i + validators_count; SEED_LENGTH])), ) }) .map(|(consensus, service)| Keys::from_keys(consensus, service)) .collect::<Vec<_>>(); let validators = keys .iter() .map(|keys| (keys.consensus_pk(), keys.consensus_sk().clone())) .collect::<Vec<_>>(); let service_keys = keys .iter() .map(|keys| (keys.service_pk(), keys.service_sk().clone())) .collect::<Vec<_>>(); let addresses = (1..=validators_count) .map(gen_primitive_socket_addr) .collect::<Vec<_>>(); let str_addresses: Vec<String> = addresses.iter().map(ToString::to_string).collect(); let connect_infos: Vec<_> = keys .iter() .zip(str_addresses.iter()) .map(|(keys, a)| ConnectInfo { address: a.clone(), public_key: keys.consensus_pk(), }) .collect(); let validator_keys = keys .iter() .map(|keys| ValidatorKeys::new(keys.consensus_pk(), keys.service_pk())) .collect(); let genesis = consensus.with_validator_keys(validator_keys); let connect_list_config = ConnectListConfig::from_validator_keys(&genesis.validator_keys, &str_addresses); let tx_channel = mpsc::channel(100); let blockchain = Blockchain::new( TemporaryDB::new(), service_keys[0].clone(), ApiSender::new(tx_channel.0.clone()), ); let genesis_config = create_genesis_config(genesis, artifacts, instances); let blockchain = BlockchainBuilder::new(blockchain) .with_genesis_config(genesis_config) .with_runtime(rust_runtime.build_for_tests()) .build(); let config = Configuration { connect_list: ConnectList::from_config(connect_list_config), network: NetworkConfiguration::default(), peer_discovery: Vec::new(), mempool: MemoryPoolConfig::default(), keys: keys[0].clone(), }; let system_state = SandboxSystemStateProvider { listen_address: addresses[0], shared_time: SharedTime::new(Mutex::new( UNIX_EPOCH + Duration::new(INITIAL_TIME_IN_SECS, 0), )), }; let shared_time = Arc::clone(&system_state.shared_time); let network_channel = mpsc::channel(100); let internal_channel = mpsc::channel(100); let api_channel = mpsc::channel(100); let node_sender = NodeSender { network_requests: SyncSender::new(network_channel.0.clone(), "network request"), internal_requests: SyncSender::new(internal_channel.0.clone(), "internal request"), _transactions: tx_channel.0.clone(), _api_requests: api_channel.0, }; let api_state = SharedNodeState::new(5_000); let mut handler = NodeHandler::new( blockchain, &str_addresses[0], node_sender, Box::new(system_state), config, api_state, None, Box::new(StandardPoolManager::default()), ); handler.initialize(); let inner = SandboxInner { sent: GuardedQueue::default(), _events: VecDeque::new(), timers: BinaryHeap::new(), network_requests_rx: network_channel.1, api_requests_rx: api_channel.1, transactions_rx: tx_channel.1, internal_requests_rx: internal_channel.1, handler, time: shared_time, }; let sandbox = Sandbox { inner: RefCell::new(inner), api_sender: ApiSender::new(tx_channel.0), validators_map: HashMap::from_iter(validators), services_map: HashMap::from_iter(service_keys), addresses: connect_infos, connect: None, }; // General assumption; necessary for correct work of consensus algorithm assert!(PROPOSE_TIMEOUT < sandbox.first_round_timeout()); sandbox.process_events(); sandbox } pub fn timestamping_sandbox() -> Sandbox { timestamping_sandbox_builder().build() } pub fn timestamping_sandbox_builder() -> SandboxBuilder { SandboxBuilder::new() .with_default_rust_service(TimestampingService) .with_default_rust_service(SupervisorService) } #[cfg(test)] mod unit_tests { use super::{ gen_primitive_socket_addr, timestamping_sandbox, user_agent, ConnectInfo, ConsensusConfig, Duration, Height, KeyPair, Round, Sandbox, SocketAddr, ValidatorId, ValidatorKeys, }; impl Sandbox { fn add_peer_to_connect_list(&self, addr: SocketAddr, validator_keys: ValidatorKeys) { let public_key = validator_keys.consensus_key; let config = { let inner = &self.inner.borrow_mut(); let state = inner.handler.state(); let mut config = state.config().clone(); config.validator_keys.push(validator_keys); config }; self.update_config(config); self.inner .borrow_mut() .handler .state_mut() .add_peer_to_connect_list(ConnectInfo { address: addr.to_string(), public_key, }); } fn update_config(&self, config: ConsensusConfig) { self.inner .borrow_mut() .handler .state_mut() .update_config(config); } } #[test] fn test_sandbox_init() { timestamping_sandbox(); } #[test] fn test_sandbox_recv_and_send() { let sandbox = timestamping_sandbox(); // As far as all validators have connected to each other during // sandbox initialization, we need to use connect-message with unknown // keypair. let consensus = KeyPair::random(); let service = KeyPair::random(); let validator_keys = ValidatorKeys::new(consensus.public_key(), service.public_key()); let new_peer_addr = gen_primitive_socket_addr(2); // We also need to add public key from this keypair to the ConnectList. // Socket address doesn't matter in this case. sandbox.add_peer_to_connect_list(new_peer_addr, validator_keys); sandbox.recv(&Sandbox::create_connect( &consensus.public_key(), new_peer_addr.to_string(), sandbox.time().into(), &user_agent(), consensus.secret_key(), )); sandbox.send( consensus.public_key(), &Sandbox::create_connect( &sandbox.public_key(ValidatorId(0)), sandbox.address(ValidatorId(0)), sandbox.time().into(), &user_agent(), sandbox.secret_key(ValidatorId(0)), ), ); } #[test] fn test_sandbox_assert_status() { let sandbox = timestamping_sandbox(); sandbox.assert_state(Height(1), Round(1)); sandbox.add_time(Duration::from_millis(999)); sandbox.assert_state(Height(1), Round(1)); sandbox.add_time(Duration::from_millis(1)); sandbox.assert_state(Height(1), Round(2)); } #[test] #[should_panic(expected = "Expected to send the message")] fn test_sandbox_expected_to_send_but_nothing_happened() { let sandbox = timestamping_sandbox(); sandbox.send( sandbox.public_key(ValidatorId(1)), &Sandbox::create_connect( &sandbox.public_key(ValidatorId(0)), sandbox.address(ValidatorId(0)), sandbox.time().into(), &user_agent(), sandbox.secret_key(ValidatorId(0)), ), ); } #[test] #[should_panic(expected = "Expected to send message to other recipient")] fn test_sandbox_expected_to_send_another_message() { let sandbox = timestamping_sandbox(); // See comments to `test_sandbox_recv_and_send`. let consensus_keys = KeyPair::random(); let service_key = KeyPair::random().public_key(); let validator_keys = ValidatorKeys::new(consensus_keys.public_key(), service_key); sandbox.add_peer_to_connect_list(gen_primitive_socket_addr(1), validator_keys); sandbox.recv(&Sandbox::create_connect( &consensus_keys.public_key(), sandbox.address(ValidatorId(2)), sandbox.time().into(), &user_agent(), consensus_keys.secret_key(), )); sandbox.send( sandbox.public_key(ValidatorId(1)), &Sandbox::create_connect( &sandbox.public_key(ValidatorId(0)), sandbox.address(ValidatorId(0)), sandbox.time().into(), &user_agent(), sandbox.secret_key(ValidatorId(0)), ), ); } #[test] #[should_panic(expected = "Sent unexpected message")] fn test_sandbox_unexpected_message_when_drop() { let sandbox = timestamping_sandbox(); // See comments to `test_sandbox_recv_and_send`. let consensus_keys = KeyPair::random(); let service_key = KeyPair::random().public_key(); let validator_keys = ValidatorKeys::new(consensus_keys.public_key(), service_key); sandbox.add_peer_to_connect_list(gen_primitive_socket_addr(1), validator_keys); sandbox.recv(&Sandbox::create_connect( &consensus_keys.public_key(), sandbox.address(ValidatorId(2)), sandbox.time().into(), &user_agent(), consensus_keys.secret_key(), )); } #[test] #[should_panic(expected = "Sent unexpected message")] fn test_sandbox_unexpected_message_when_handle_another_message() { let sandbox = timestamping_sandbox(); // See comments to `test_sandbox_recv_and_send`. let consensus_keys = KeyPair::random(); let service_key = KeyPair::random().public_key(); let validator_keys = ValidatorKeys::new(consensus_keys.public_key(), service_key); sandbox.add_peer_to_connect_list(gen_primitive_socket_addr(1), validator_keys); sandbox.recv(&Sandbox::create_connect( &consensus_keys.public_key(), sandbox.address(ValidatorId(2)), sandbox.time().into(), &user_agent(), consensus_keys.secret_key(), )); sandbox.recv(&Sandbox::create_connect( &consensus_keys.public_key(), sandbox.address(ValidatorId(3)), sandbox.time().into(), &user_agent(), consensus_keys.secret_key(), )); panic!("Oops! We don't catch unexpected message"); } #[test] #[should_panic(expected = "Sent unexpected message")] fn test_sandbox_unexpected_message_when_time_changed() { let sandbox = timestamping_sandbox(); // See comments to `test_sandbox_recv_and_send`. let consensus_keys = KeyPair::random(); let service_key = KeyPair::random().public_key(); let validator_keys = ValidatorKeys::new(consensus_keys.public_key(), service_key); sandbox.add_peer_to_connect_list(gen_primitive_socket_addr(1), validator_keys); sandbox.recv(&Sandbox::create_connect( &consensus_keys.public_key(), sandbox.address(ValidatorId(2)), sandbox.time().into(), &user_agent(), consensus_keys.secret_key(), )); sandbox.add_time(Duration::from_millis(1000)); panic!("Oops! We didn't catch the unexpected message"); } }<|fim▁end|>
}
<|file_name|>ircensus_channel_bot.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 """ A simple bot to gather some census data in IRC channels. It is intended to sit in a channel and collect the data for statistics. :author: tpltnt :license: AGPLv3 """ import irc.bot import irc.strings from irc.client import ip_numstr_to_quad, ip_quad_to_numstr class CensusBot(irc.bot.SingleServerIRCBot): """ The class implementing the census bot. """ def __init__(self, channel, nickname, server, port=6667): """ The constructor for the CensusBot class. :param channel: name of the channel to join :type channel: str :param nickname: nick of the bot (to use) :type nickname: str :param server: FQDN of the server to use :type server: str :param port: port to use when connecting to the server :type port: int """ if 0 != channel.find('#'): channel = '#' + channel irc.bot.SingleServerIRCBot.__init__(self, [(server, port)], nickname, nickname) self.channel = channel<|fim▁hole|> def on_nickname_in_use(self, connection, event): """ Change own nickname if already in use. :param connection: connection to the server :type connection: irc.client.ServerConnection :param event: event to react to :type event: :raises: TypeError """ if not isinstance(connection, ServerConnection): raise TypeError("'connection' is not of type 'ServerConnection'") connection.nick(connection.get_nickname() + "_") def main(): import sys if len(sys.argv) != 4: print("Usage: " + sys.argv[0] + " <server[:port]> <channel> <nickname>") sys.exit(1) server = sys.argv[1].split(":", 1) host = server[0] if len(server) == 2: try: port = int(server[1]) except ValueError: print("Error: Erroneous port.") sys.exit(1) else: port = 6667 channel = sys.argv[2] nickname = sys.argv[3] bot = CensusBot(channel, nickname, server, port) bot.start() if __name__ == "__main__": main()<|fim▁end|>
<|file_name|>a.py<|end_file_name|><|fim▁begin|>def suma(a, b): return a+b def resta(a, b):<|fim▁hole|><|fim▁end|>
return a+b
<|file_name|>TestOzoneShell.java<|end_file_name|><|fim▁begin|>/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ozone.ozShell; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.PrintStream; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Random; import java.util.UUID; import java.util.stream.Collectors; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.cli.MissingSubcommandException; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneAcl.OzoneACLRights; import org.apache.hadoop.ozone.OzoneAcl.OzoneACLType; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.client.rest.RestClient; import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.web.ozShell.Shell; import org.apache.hadoop.ozone.web.request.OzoneQuota; import org.apache.hadoop.ozone.web.response.BucketInfo; import org.apache.hadoop.ozone.web.response.KeyInfo; import org.apache.hadoop.ozone.web.response.VolumeInfo; import org.apache.hadoop.ozone.web.utils.JsonUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import com.google.common.base.Strings; import org.apache.commons.lang3.RandomStringUtils; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; import picocli.CommandLine.ExecutionException; import picocli.CommandLine.IExceptionHandler2; import picocli.CommandLine.ParameterException; import picocli.CommandLine.ParseResult; import picocli.CommandLine.RunLast; /** * This test class specified for testing Ozone shell command. */ @RunWith(value = Parameterized.class) public class TestOzoneShell { private static final Logger LOG = LoggerFactory.getLogger(TestOzoneShell.class); /** * Set the timeout for every test. */ @Rule public Timeout testTimeout = new Timeout(300000); private static String url; private static File baseDir; private static OzoneConfiguration conf = null; private static MiniOzoneCluster cluster = null; private static ClientProtocol client = null; private static Shell shell = null; private final ByteArrayOutputStream out = new ByteArrayOutputStream(); private final ByteArrayOutputStream err = new ByteArrayOutputStream(); private static final PrintStream OLD_OUT = System.out; private static final PrintStream OLD_ERR = System.err; @Parameterized.Parameters public static Collection<Object[]> clientProtocol() { Object[][] params = new Object[][] { {RpcClient.class}, {RestClient.class}}; return Arrays.asList(params); } @Parameterized.Parameter public Class clientProtocol; /** * Create a MiniDFSCluster for testing with using distributed Ozone * handler type. * * @throws Exception */ @BeforeClass public static void init() throws Exception { conf = new OzoneConfiguration(); String path = GenericTestUtils.getTempPath( TestOzoneShell.class.getSimpleName()); baseDir = new File(path); baseDir.mkdirs(); shell = new Shell(); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) .build(); conf.setInt(OZONE_REPLICATION, ReplicationFactor.THREE.getValue()); conf.setQuietMode(false); client = new RpcClient(conf); cluster.waitForClusterToBeReady(); } /** * shutdown MiniDFSCluster. */ @AfterClass public static void shutdown() { if (cluster != null) { cluster.shutdown(); } if (baseDir != null) { FileUtil.fullyDelete(baseDir, true); } } @Before public void setup() { System.setOut(new PrintStream(out)); System.setErr(new PrintStream(err)); if(clientProtocol.equals(RestClient.class)) { String hostName = cluster.getOzoneManager().getHttpServer() .getHttpAddress().getHostName(); int port = cluster .getOzoneManager().getHttpServer().getHttpAddress().getPort(); url = String.format("http://" + hostName + ":" + port); } else { List<ServiceInfo> services = null; try { services = cluster.getOzoneManager().getServiceList(); } catch (IOException e) { LOG.error("Could not get service list from OM"); } String hostName = services.stream().filter( a -> a.getNodeType().equals(HddsProtos.NodeType.OM)) .collect(Collectors.toList()).get(0).getHostname(); String port = cluster.getOzoneManager().getRpcPort(); url = String.format("o3://" + hostName + ":" + port); } } @After public void reset() { // reset stream after each unit test out.reset(); err.reset(); // restore system streams System.setOut(OLD_OUT); System.setErr(OLD_ERR); } @Test public void testCreateVolume() throws Exception { LOG.info("Running testCreateVolume"); String volumeName = "volume" + RandomStringUtils.randomNumeric(5); testCreateVolume(volumeName, ""); volumeName = "volume" + RandomStringUtils.randomNumeric(5); testCreateVolume("/////" + volumeName, ""); testCreateVolume("/////", "Volume name is required"); testCreateVolume("/////vol/123", "Invalid volume name. Delimiters (/) not allowed in volume name"); } private void testCreateVolume(String volumeName, String errorMsg) throws Exception { err.reset(); String userName = "bilbo"; String[] args = new String[] {"volume", "create", url + "/" + volumeName, "--user", userName, "--root"}; if (Strings.isNullOrEmpty(errorMsg)) { execute(shell, args); } else { executeWithError(shell, args, errorMsg); return; } String truncatedVolumeName = volumeName.substring(volumeName.lastIndexOf('/') + 1); OzoneVolume volumeInfo = client.getVolumeDetails(truncatedVolumeName); assertEquals(truncatedVolumeName, volumeInfo.getName()); assertEquals(userName, volumeInfo.getOwner()); } private void execute(Shell ozoneShell, String[] args) { List<String> arguments = new ArrayList(Arrays.asList(args)); LOG.info("Executing shell command with args {}", arguments); CommandLine cmd = ozoneShell.getCmd(); IExceptionHandler2<List<Object>> exceptionHandler = new IExceptionHandler2<List<Object>>() { @Override public List<Object> handleParseException(ParameterException ex, String[] args) { throw ex; } @Override public List<Object> handleExecutionException(ExecutionException ex, ParseResult parseResult) { throw ex; } }; cmd.parseWithHandlers(new RunLast(), exceptionHandler, args); } /** * Test to create volume without specifying --user or -u. * @throws Exception */ @Test public void testCreateVolumeWithoutUser() throws Exception { String volumeName = "volume" + RandomStringUtils.randomNumeric(1); String[] args = new String[] {"volume", "create", url + "/" + volumeName, "--root"}; execute(shell, args); String truncatedVolumeName = volumeName.substring(volumeName.lastIndexOf('/') + 1); OzoneVolume volumeInfo = client.getVolumeDetails(truncatedVolumeName); assertEquals(truncatedVolumeName, volumeInfo.getName()); assertEquals(UserGroupInformation.getCurrentUser().getUserName(), volumeInfo.getOwner()); } @Test public void testDeleteVolume() throws Exception { LOG.info("Running testDeleteVolume"); String volumeName = "volume" + RandomStringUtils.randomNumeric(5); VolumeArgs volumeArgs = VolumeArgs.newBuilder() .setOwner("bilbo") .setQuota("100TB") .build(); client.createVolume(volumeName, volumeArgs); OzoneVolume volume = client.getVolumeDetails(volumeName); assertNotNull(volume); String[] args = new String[] {"volume", "delete", url + "/" + volumeName}; execute(shell, args); String output = out.toString(); assertTrue(output.contains("Volume " + volumeName + " is deleted")); // verify if volume has been deleted try { client.getVolumeDetails(volumeName); fail("Get volume call should have thrown."); } catch (IOException e) { GenericTestUtils.assertExceptionContains( "Info Volume failed, error:VOLUME_NOT_FOUND", e); } volumeName = "volume" + RandomStringUtils.randomNumeric(5); volumeArgs = VolumeArgs.newBuilder() .setOwner("bilbo") .setQuota("100TB") .build(); client.createVolume(volumeName, volumeArgs); volume = client.getVolumeDetails(volumeName); assertNotNull(volume); //volumeName prefixed with / String volumeNameWithSlashPrefix = "/" + volumeName; args = new String[] {"volume", "delete", url + "/" + volumeNameWithSlashPrefix}; execute(shell, args); output = out.toString(); assertTrue(output.contains("Volume " + volumeName + " is deleted")); // verify if volume has been deleted try { client.getVolumeDetails(volumeName); fail("Get volume call should have thrown."); } catch (IOException e) { GenericTestUtils.assertExceptionContains( "Info Volume failed, error:VOLUME_NOT_FOUND", e); } } @Test public void testInfoVolume() throws Exception { LOG.info("Running testInfoVolume"); String volumeName = "volume" + RandomStringUtils.randomNumeric(5); VolumeArgs volumeArgs = VolumeArgs.newBuilder() .setOwner("bilbo") .setQuota("100TB") .build(); client.createVolume(volumeName, volumeArgs); //volumeName supplied as-is String[] args = new String[] {"volume", "info", url + "/" + volumeName}; execute(shell, args); String output = out.toString(); assertTrue(output.contains(volumeName)); assertTrue(output.contains("createdOn") && output.contains(OzoneConsts.OZONE_TIME_ZONE)); //volumeName prefixed with / String volumeNameWithSlashPrefix = "/" + volumeName; args = new String[] {"volume", "info", url + "/" + volumeNameWithSlashPrefix}; execute(shell, args); output = out.toString(); assertTrue(output.contains(volumeName)); assertTrue(output.contains("createdOn") && output.contains(OzoneConsts.OZONE_TIME_ZONE)); // test infoVolume with invalid volume name args = new String[] {"volume", "info", url + "/" + volumeName + "/invalid-name"}; executeWithError(shell, args, "Invalid volume name. " + "Delimiters (/) not allowed in volume name"); // get info for non-exist volume args = new String[] {"volume", "info", url + "/invalid-volume"}; executeWithError(shell, args, "VOLUME_NOT_FOUND"); } @Test public void testShellIncompleteCommand() throws Exception { LOG.info("Running testShellIncompleteCommand"); String expectedError = "Incomplete command"; String[] args = new String[] {}; //executing 'ozone sh' executeWithError(shell, args, expectedError, "Usage: ozone sh [-hV] [--verbose] [-D=<String=String>]..." + " [COMMAND]"); args = new String[] {"volume"}; //executing 'ozone sh volume' executeWithError(shell, args, expectedError, "Usage: ozone sh volume [-hV] [COMMAND]"); args = new String[] {"bucket"}; //executing 'ozone sh bucket' executeWithError(shell, args, expectedError, "Usage: ozone sh bucket [-hV] [COMMAND]"); args = new String[] {"key"}; //executing 'ozone sh key' executeWithError(shell, args, expectedError, "Usage: ozone sh key [-hV] [COMMAND]"); } @Test public void testUpdateVolume() throws Exception { LOG.info("Running testUpdateVolume"); String volumeName = "volume" + RandomStringUtils.randomNumeric(5); String userName = "bilbo"; VolumeArgs volumeArgs = VolumeArgs.newBuilder() .setOwner("bilbo") .setQuota("100TB") .build(); client.createVolume(volumeName, volumeArgs); OzoneVolume vol = client.getVolumeDetails(volumeName); assertEquals(userName, vol.getOwner()); assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(), vol.getQuota()); String[] args = new String[] {"volume", "update", url + "/" + volumeName, "--quota", "500MB"}; execute(shell, args); vol = client.getVolumeDetails(volumeName); assertEquals(userName, vol.getOwner()); assertEquals(OzoneQuota.parseQuota("500MB").sizeInBytes(), vol.getQuota()); String newUser = "new-user"; args = new String[] {"volume", "update", url + "/" + volumeName, "--user", newUser}; execute(shell, args); vol = client.getVolumeDetails(volumeName); assertEquals(newUser, vol.getOwner()); //volume with / prefix String volumeWithPrefix = "/" + volumeName; String newUser2 = "new-user2"; args = new String[] {"volume", "update", url + "/" + volumeWithPrefix, "--user", newUser2}; execute(shell, args); vol = client.getVolumeDetails(volumeName); assertEquals(newUser2, vol.getOwner()); // test error conditions args = new String[] {"volume", "update", url + "/invalid-volume", "--user", newUser}; executeWithError(shell, args, "Info Volume failed, error:VOLUME_NOT_FOUND"); err.reset(); args = new String[] {"volume", "update", url + "/invalid-volume", "--quota", "500MB"}; executeWithError(shell, args, "Info Volume failed, error:VOLUME_NOT_FOUND"); } /** * Execute command, assert exeception message and returns true if error * was thrown. */ private void executeWithError(Shell ozoneShell, String[] args, String expectedError) { if (Strings.isNullOrEmpty(expectedError)) { execute(ozoneShell, args); } else { try { execute(ozoneShell, args); fail("Exception is expected from command execution " + Arrays .asList(args)); } catch (Exception ex) { if (!Strings.isNullOrEmpty(expectedError)) { Throwable exceptionToCheck = ex; if (exceptionToCheck.getCause() != null) { exceptionToCheck = exceptionToCheck.getCause(); } Assert.assertTrue( String.format( "Error of shell code doesn't contain the " + "exception [%s] in [%s]", expectedError, exceptionToCheck.getMessage()), exceptionToCheck.getMessage().contains(expectedError)); } } } } /** * Execute command, assert exception message and returns true if error * was thrown and contains the specified usage string. */ private void executeWithError(Shell ozoneShell, String[] args, String expectedError, String usage) { if (Strings.isNullOrEmpty(expectedError)) { execute(ozoneShell, args); } else { try { execute(ozoneShell, args); fail("Exception is expected from command execution " + Arrays .asList(args)); } catch (Exception ex) { if (!Strings.isNullOrEmpty(expectedError)) { Throwable exceptionToCheck = ex; if (exceptionToCheck.getCause() != null) { exceptionToCheck = exceptionToCheck.getCause(); } Assert.assertTrue( String.format( "Error of shell code doesn't contain the " + "exception [%s] in [%s]", expectedError, exceptionToCheck.getMessage()), exceptionToCheck.getMessage().contains(expectedError)); Assert.assertTrue( exceptionToCheck instanceof MissingSubcommandException); Assert.assertTrue( ((MissingSubcommandException)exceptionToCheck) .getUsage().contains(usage)); } } } } @Test public void testListVolume() throws Exception { LOG.info("Running testListVolume"); String protocol = clientProtocol.getName().toLowerCase(); String commandOutput, commandError; List<VolumeInfo> volumes; final int volCount = 20; final String user1 = "test-user-a-" + protocol; final String user2 = "test-user-b-" + protocol; // Create 20 volumes, 10 for user1 and another 10 for user2. for (int x = 0; x < volCount; x++) { String volumeName; String userName; if (x % 2 == 0) { // create volume [test-vol0, test-vol2, ..., test-vol18] for user1 userName = user1; volumeName = "test-vol-" + protocol + x; } else { // create volume [test-vol1, test-vol3, ..., test-vol19] for user2 userName = user2; volumeName = "test-vol-" + protocol + x; } VolumeArgs volumeArgs = VolumeArgs.newBuilder() .setOwner(userName) .setQuota("100TB") .build(); client.createVolume(volumeName, volumeArgs); OzoneVolume vol = client.getVolumeDetails(volumeName); assertNotNull(vol); } String[] args = new String[] {"volume", "list", url + "/abcde", "--user", user1, "--length", "100"}; executeWithError(shell, args, "Invalid URI"); err.reset(); // test -length option args = new String[] {"volume", "list", url + "/", "--user", user1, "--length", "100"}; execute(shell, args); commandOutput = out.toString(); volumes = (List<VolumeInfo>) JsonUtils .toJsonList(commandOutput, VolumeInfo.class); assertEquals(10, volumes.size()); for (VolumeInfo volume : volumes) { assertEquals(volume.getOwner().getName(), user1); assertTrue(volume.getCreatedOn().contains(OzoneConsts.OZONE_TIME_ZONE)); } out.reset(); args = new String[] {"volume", "list", url + "/", "--user", user1, "--length", "2"}; execute(shell, args); commandOutput = out.toString(); volumes = (List<VolumeInfo>) JsonUtils .toJsonList(commandOutput, VolumeInfo.class); assertEquals(2, volumes.size()); // test --prefix option out.reset(); args = new String[] {"volume", "list", url + "/", "--user", user1, "--length", "100", "--prefix", "test-vol-" + protocol + "1"}; execute(shell, args); commandOutput = out.toString(); volumes = (List<VolumeInfo>) JsonUtils .toJsonList(commandOutput, VolumeInfo.class); assertEquals(5, volumes.size()); // return volume names should be [test-vol10, test-vol12, ..., test-vol18] for (int i = 0; i < volumes.size(); i++) { assertEquals(volumes.get(i).getVolumeName(), "test-vol-" + protocol + ((i + 5) * 2)); assertEquals(volumes.get(i).getOwner().getName(), user1); } // test -start option out.reset(); args = new String[] {"volume", "list", url + "/", "--user", user2, "--length", "100", "--start", "test-vol-" + protocol + "15"}; execute(shell, args); commandOutput = out.toString(); volumes = (List<VolumeInfo>) JsonUtils .toJsonList(commandOutput, VolumeInfo.class); assertEquals(2, volumes.size()); assertEquals(volumes.get(0).getVolumeName(), "test-vol-" + protocol + "17"); assertEquals(volumes.get(1).getVolumeName(), "test-vol-" + protocol + "19"); assertEquals(volumes.get(0).getOwner().getName(), user2); assertEquals(volumes.get(1).getOwner().getName(), user2); // test error conditions err.reset(); args = new String[] {"volume", "list", url + "/", "--user", user2, "--length", "-1"}; executeWithError(shell, args, "the length should be a positive number"); err.reset(); args = new String[] {"volume", "list", url + "/", "--user", user2, "--length", "invalid-length"}; executeWithError(shell, args, "For input string: \"invalid-length\""); } @Test public void testCreateBucket() throws Exception { LOG.info("Running testCreateBucket"); OzoneVolume vol = creatVolume(); String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); String[] args = new String[] {"bucket", "create", url + "/" + vol.getName() + "/" + bucketName}; execute(shell, args); OzoneBucket bucketInfo = vol.getBucket(bucketName); assertEquals(vol.getName(), bucketInfo.getVolumeName()); assertEquals(bucketName, bucketInfo.getName()); // test create a bucket in a non-exist volume args = new String[] {"bucket", "create", url + "/invalid-volume/" + bucketName}; executeWithError(shell, args, "Info Volume failed, error:VOLUME_NOT_FOUND"); // test createBucket with invalid bucket name args = new String[] {"bucket", "create", url + "/" + vol.getName() + "/" + bucketName + "/invalid-name"}; executeWithError(shell, args, "Invalid bucket name. Delimiters (/) not allowed in bucket name"); } @Test public void testDeleteBucket() throws Exception { LOG.info("Running testDeleteBucket"); OzoneVolume vol = creatVolume(); String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); vol.createBucket(bucketName); OzoneBucket bucketInfo = vol.getBucket(bucketName); assertNotNull(bucketInfo); String[] args = new String[] {"bucket", "delete", url + "/" + vol.getName() + "/" + bucketName}; execute(shell, args); // verify if bucket has been deleted in volume try { vol.getBucket(bucketName); fail("Get bucket should have thrown."); } catch (IOException e) { GenericTestUtils.assertExceptionContains( "Info Bucket failed, error: BUCKET_NOT_FOUND", e); } // test delete bucket in a non-exist volume args = new String[] {"bucket", "delete", url + "/invalid-volume" + "/" + bucketName}; executeWithError(shell, args, "Info Volume failed, error:VOLUME_NOT_FOUND"); err.reset(); // test delete non-exist bucket args = new String[] {"bucket", "delete", url + "/" + vol.getName() + "/invalid-bucket"}; executeWithError(shell, args, "Delete Bucket failed, error:BUCKET_NOT_FOUND"); } @Test public void testInfoBucket() throws Exception { LOG.info("Running testInfoBucket"); OzoneVolume vol = creatVolume(); String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); vol.createBucket(bucketName); String[] args = new String[] {"bucket", "info", url + "/" + vol.getName() + "/" + bucketName}; execute(shell, args); String output = out.toString(); assertTrue(output.contains(bucketName)); assertTrue(output.contains("createdOn") && output.contains(OzoneConsts.OZONE_TIME_ZONE)); // test infoBucket with invalid bucket name args = new String[] {"bucket", "info", url + "/" + vol.getName() + "/" + bucketName + "/invalid-name"}; executeWithError(shell, args, "Invalid bucket name. Delimiters (/) not allowed in bucket name"); // test get info from a non-exist bucket args = new String[] {"bucket", "info", url + "/" + vol.getName() + "/invalid-bucket" + bucketName}; executeWithError(shell, args, "Info Bucket failed, error: BUCKET_NOT_FOUND"); } @Test public void testUpdateBucket() throws Exception { LOG.info("Running testUpdateBucket"); OzoneVolume vol = creatVolume(); String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); vol.createBucket(bucketName); OzoneBucket bucket = vol.getBucket(bucketName); int aclSize = bucket.getAcls().size(); String[] args = new String[] {"bucket", "update", url + "/" + vol.getName() + "/" + bucketName, "--addAcl", "user:frodo:rw,group:samwise:r"}; execute(shell, args); String output = out.toString(); assertTrue(output.contains("createdOn") && output.contains(OzoneConsts.OZONE_TIME_ZONE)); bucket = vol.getBucket(bucketName); assertEquals(2 + aclSize, bucket.getAcls().size()); OzoneAcl acl = bucket.getAcls().get(aclSize); assertTrue(acl.getName().equals("frodo") && acl.getType() == OzoneACLType.USER && acl.getRights()== OzoneACLRights.READ_WRITE); args = new String[] {"bucket", "update", url + "/" + vol.getName() + "/" + bucketName, "--removeAcl", "user:frodo:rw"}; execute(shell, args); bucket = vol.getBucket(bucketName); acl = bucket.getAcls().get(aclSize); assertEquals(1 + aclSize, bucket.getAcls().size()); assertTrue(acl.getName().equals("samwise") && acl.getType() == OzoneACLType.GROUP && acl.getRights()== OzoneACLRights.READ); // test update bucket for a non-exist bucket args = new String[] {"bucket", "update", url + "/" + vol.getName() + "/invalid-bucket", "--addAcl", "user:frodo:rw"}; executeWithError(shell, args, "Info Bucket failed, error: BUCKET_NOT_FOUND"); } @Test public void testListBucket() throws Exception { LOG.info("Running testListBucket"); List<BucketInfo> buckets; String commandOutput; int bucketCount = 11; OzoneVolume vol = creatVolume(); List<String> bucketNames = new ArrayList<>(); // create bucket from test-bucket0 to test-bucket10 for (int i = 0; i < bucketCount; i++) { String name = "test-bucket" + i; bucketNames.add(name); vol.createBucket(name); OzoneBucket bucket = vol.getBucket(name); assertNotNull(bucket); } // test listBucket with invalid volume name String[] args = new String[] {"bucket", "list", url + "/" + vol.getName() + "/invalid-name"}; executeWithError(shell, args, "Invalid volume name. " + "Delimiters (/) not allowed in volume name"); // test -length option args = new String[] {"bucket", "list", url + "/" + vol.getName(), "--length", "100"}; execute(shell, args); commandOutput = out.toString(); buckets = (List<BucketInfo>) JsonUtils.toJsonList(commandOutput, BucketInfo.class); assertEquals(11, buckets.size()); // sort bucket names since the return buckets isn't in created order Collections.sort(bucketNames); // return bucket names should be [test-bucket0, test-bucket1, // test-bucket10, test-bucket2, ,..., test-bucket9] for (int i = 0; i < buckets.size(); i++) { assertEquals(buckets.get(i).getBucketName(), bucketNames.get(i)); assertEquals(buckets.get(i).getVolumeName(), vol.getName()); assertTrue(buckets.get(i).getCreatedOn() .contains(OzoneConsts.OZONE_TIME_ZONE)); } out.reset(); args = new String[] {"bucket", "list", url + "/" + vol.getName(), "--length", "3"}; execute(shell, args); commandOutput = out.toString(); buckets = (List<BucketInfo>) JsonUtils.toJsonList(commandOutput, BucketInfo.class); assertEquals(3, buckets.size()); // return bucket names should be [test-bucket0, // test-bucket1, test-bucket10] assertEquals(buckets.get(0).getBucketName(), "test-bucket0"); assertEquals(buckets.get(1).getBucketName(), "test-bucket1"); assertEquals(buckets.get(2).getBucketName(), "test-bucket10"); // test --prefix option out.reset(); args = new String[] {"bucket", "list", url + "/" + vol.getName(), "--length", "100", "--prefix", "test-bucket1"}; execute(shell, args); commandOutput = out.toString(); buckets = (List<BucketInfo>) JsonUtils.toJsonList(commandOutput, BucketInfo.class); assertEquals(2, buckets.size()); // return bucket names should be [test-bucket1, test-bucket10] assertEquals(buckets.get(0).getBucketName(), "test-bucket1"); assertEquals(buckets.get(1).getBucketName(), "test-bucket10"); // test -start option out.reset(); args = new String[] {"bucket", "list", url + "/" + vol.getName(), "--length", "100", "--start", "test-bucket7"}; execute(shell, args); commandOutput = out.toString(); buckets = (List<BucketInfo>) JsonUtils.toJsonList(commandOutput, BucketInfo.class); assertEquals(2, buckets.size()); assertEquals(buckets.get(0).getBucketName(), "test-bucket8"); assertEquals(buckets.get(1).getBucketName(), "test-bucket9"); // test error conditions err.reset(); args = new String[] {"bucket", "list", url + "/" + vol.getName(), "--length", "-1"}; executeWithError(shell, args, "the length should be a positive number"); } @Test public void testPutKey() throws Exception { LOG.info("Running testPutKey"); OzoneBucket bucket = creatBucket(); String volumeName = bucket.getVolumeName(); String bucketName = bucket.getName(); String keyName = "key" + RandomStringUtils.randomNumeric(5); String[] args = new String[] {"key", "put", url + "/" + volumeName + "/" + bucketName + "/" + keyName, createTmpFile()};<|fim▁hole|> OzoneKey keyInfo = bucket.getKey(keyName); assertEquals(keyName, keyInfo.getName()); // test put key in a non-exist bucket args = new String[] {"key", "put", url + "/" + volumeName + "/invalid-bucket/" + keyName, createTmpFile()}; executeWithError(shell, args, "Info Bucket failed, error: BUCKET_NOT_FOUND"); } @Test public void testGetKey() throws Exception { LOG.info("Running testGetKey"); String keyName = "key" + RandomStringUtils.randomNumeric(5); OzoneBucket bucket = creatBucket(); String volumeName = bucket.getVolumeName(); String bucketName = bucket.getName(); String dataStr = "test-data"; OzoneOutputStream keyOutputStream = bucket.createKey(keyName, dataStr.length()); keyOutputStream.write(dataStr.getBytes()); keyOutputStream.close(); String tmpPath = baseDir.getAbsolutePath() + "/testfile-" + UUID.randomUUID().toString(); String[] args = new String[] {"key", "get", url + "/" + volumeName + "/" + bucketName + "/" + keyName, tmpPath}; execute(shell, args); byte[] dataBytes = new byte[dataStr.length()]; try (FileInputStream randFile = new FileInputStream(new File(tmpPath))) { randFile.read(dataBytes); } assertEquals(dataStr, DFSUtil.bytes2String(dataBytes)); tmpPath = baseDir.getAbsolutePath() + File.separatorChar + keyName; args = new String[] {"key", "get", url + "/" + volumeName + "/" + bucketName + "/" + keyName, baseDir.getAbsolutePath()}; execute(shell, args); dataBytes = new byte[dataStr.length()]; try (FileInputStream randFile = new FileInputStream(new File(tmpPath))) { randFile.read(dataBytes); } assertEquals(dataStr, DFSUtil.bytes2String(dataBytes)); } @Test public void testDeleteKey() throws Exception { LOG.info("Running testDeleteKey"); String keyName = "key" + RandomStringUtils.randomNumeric(5); OzoneBucket bucket = creatBucket(); String volumeName = bucket.getVolumeName(); String bucketName = bucket.getName(); String dataStr = "test-data"; OzoneOutputStream keyOutputStream = bucket.createKey(keyName, dataStr.length()); keyOutputStream.write(dataStr.getBytes()); keyOutputStream.close(); OzoneKey keyInfo = bucket.getKey(keyName); assertEquals(keyName, keyInfo.getName()); String[] args = new String[] {"key", "delete", url + "/" + volumeName + "/" + bucketName + "/" + keyName}; execute(shell, args); // verify if key has been deleted in the bucket try { bucket.getKey(keyName); fail("Get key should have thrown."); } catch (IOException e) { GenericTestUtils.assertExceptionContains( "Lookup key failed, error:KEY_NOT_FOUND", e); } // test delete key in a non-exist bucket args = new String[] {"key", "delete", url + "/" + volumeName + "/invalid-bucket/" + keyName}; executeWithError(shell, args, "Info Bucket failed, error: BUCKET_NOT_FOUND"); err.reset(); // test delete a non-exist key in bucket args = new String[] {"key", "delete", url + "/" + volumeName + "/" + bucketName + "/invalid-key"}; executeWithError(shell, args, "Delete key failed, error:KEY_NOT_FOUND"); } @Test public void testInfoKeyDetails() throws Exception { LOG.info("Running testInfoKey"); String keyName = "key" + RandomStringUtils.randomNumeric(5); OzoneBucket bucket = creatBucket(); String volumeName = bucket.getVolumeName(); String bucketName = bucket.getName(); String dataStr = "test-data"; OzoneOutputStream keyOutputStream = bucket.createKey(keyName, dataStr.length()); keyOutputStream.write(dataStr.getBytes()); keyOutputStream.close(); String[] args = new String[] {"key", "info", url + "/" + volumeName + "/" + bucketName + "/" + keyName}; // verify the response output execute(shell, args); String output = out.toString(); assertTrue(output.contains(keyName)); assertTrue( output.contains("createdOn") && output.contains("modifiedOn") && output .contains(OzoneConsts.OZONE_TIME_ZONE)); assertTrue( output.contains("containerID") && output.contains("localID") && output .contains("length") && output.contains("offset")); // reset stream out.reset(); err.reset(); // get the info of a non-exist key args = new String[] {"key", "info", url + "/" + volumeName + "/" + bucketName + "/invalid-key"}; // verify the response output // get the non-exist key info should be failed executeWithError(shell, args, "Lookup key failed, error:KEY_NOT_FOUND"); } @Test public void testInfoDirKey() throws Exception { LOG.info("Running testInfoKey for Dir Key"); String dirKeyName = "test/"; String keyNameOnly = "test"; OzoneBucket bucket = creatBucket(); String volumeName = bucket.getVolumeName(); String bucketName = bucket.getName(); String dataStr = "test-data"; OzoneOutputStream keyOutputStream = bucket.createKey(dirKeyName, dataStr.length()); keyOutputStream.write(dataStr.getBytes()); keyOutputStream.close(); String[] args = new String[] {"key", "info", url + "/" + volumeName + "/" + bucketName + "/" + dirKeyName}; // verify the response output execute(shell, args); String output = out.toString(); assertTrue(output.contains(dirKeyName)); assertTrue(output.contains("createdOn") && output.contains("modifiedOn") && output.contains(OzoneConsts.OZONE_TIME_ZONE)); args = new String[] {"key", "info", url + "/" + volumeName + "/" + bucketName + "/" + keyNameOnly}; executeWithError(shell, args, "Lookup key failed, error:KEY_NOT_FOUND"); out.reset(); err.reset(); } @Test public void testListKey() throws Exception { LOG.info("Running testListKey"); String commandOutput; List<KeyInfo> keys; int keyCount = 11; OzoneBucket bucket = creatBucket(); String volumeName = bucket.getVolumeName(); String bucketName = bucket.getName(); String keyName; List<String> keyNames = new ArrayList<>(); for (int i = 0; i < keyCount; i++) { keyName = "test-key" + i; keyNames.add(keyName); String dataStr = "test-data"; OzoneOutputStream keyOutputStream = bucket.createKey(keyName, dataStr.length()); keyOutputStream.write(dataStr.getBytes()); keyOutputStream.close(); } // test listKey with invalid bucket name String[] args = new String[] {"key", "list", url + "/" + volumeName + "/" + bucketName + "/invalid-name"}; executeWithError(shell, args, "Invalid bucket name. " + "Delimiters (/) not allowed in bucket name"); // test -length option args = new String[] {"key", "list", url + "/" + volumeName + "/" + bucketName, "--length", "100"}; execute(shell, args); commandOutput = out.toString(); keys = (List<KeyInfo>) JsonUtils.toJsonList(commandOutput, KeyInfo.class); assertEquals(11, keys.size()); // sort key names since the return keys isn't in created order Collections.sort(keyNames); // return key names should be [test-key0, test-key1, // test-key10, test-key2, ,..., test-key9] for (int i = 0; i < keys.size(); i++) { assertEquals(keys.get(i).getKeyName(), keyNames.get(i)); // verify the creation/modification time of key assertTrue(keys.get(i).getCreatedOn() .contains(OzoneConsts.OZONE_TIME_ZONE)); assertTrue(keys.get(i).getModifiedOn() .contains(OzoneConsts.OZONE_TIME_ZONE)); } out.reset(); args = new String[] {"key", "list", url + "/" + volumeName + "/" + bucketName, "--length", "3"}; execute(shell, args); commandOutput = out.toString(); keys = (List<KeyInfo>) JsonUtils.toJsonList(commandOutput, KeyInfo.class); assertEquals(3, keys.size()); // return key names should be [test-key0, test-key1, test-key10] assertEquals(keys.get(0).getKeyName(), "test-key0"); assertEquals(keys.get(1).getKeyName(), "test-key1"); assertEquals(keys.get(2).getKeyName(), "test-key10"); // test --prefix option out.reset(); args = new String[] {"key", "list", url + "/" + volumeName + "/" + bucketName, "--length", "100", "--prefix", "test-key1"}; execute(shell, args); commandOutput = out.toString(); keys = (List<KeyInfo>) JsonUtils.toJsonList(commandOutput, KeyInfo.class); assertEquals(2, keys.size()); // return key names should be [test-key1, test-key10] assertEquals(keys.get(0).getKeyName(), "test-key1"); assertEquals(keys.get(1).getKeyName(), "test-key10"); // test -start option out.reset(); args = new String[] {"key", "list", url + "/" + volumeName + "/" + bucketName, "--length", "100", "--start", "test-key7"}; execute(shell, args); commandOutput = out.toString(); keys = (List<KeyInfo>) JsonUtils.toJsonList(commandOutput, KeyInfo.class); assertEquals(keys.get(0).getKeyName(), "test-key8"); assertEquals(keys.get(1).getKeyName(), "test-key9"); // test error conditions err.reset(); args = new String[] {"key", "list", url + "/" + volumeName + "/" + bucketName, "--length", "-1"}; executeWithError(shell, args, "the length should be a positive number"); } @Test public void testS3BucketMapping() throws IOException { List<ServiceInfo> services = cluster.getOzoneManager().getServiceList(); String omHostName = services.stream().filter( a -> a.getNodeType().equals(HddsProtos.NodeType.OM)) .collect(Collectors.toList()).get(0).getHostname(); String omPort = cluster.getOzoneManager().getRpcPort(); String setOmAddress = "--set=" + OZONE_OM_ADDRESS_KEY + "=" + omHostName + ":" + omPort; String s3Bucket = "bucket1"; String commandOutput; createS3Bucket("ozone", s3Bucket); //WHEN String[] args = new String[] {setOmAddress, "bucket", "path", s3Bucket}; execute(shell, args); //THEN commandOutput = out.toString(); String volumeName = client.getOzoneVolumeName(s3Bucket); assertTrue(commandOutput.contains("Volume name for S3Bucket is : " + volumeName)); assertTrue(commandOutput.contains(OzoneConsts.OZONE_URI_SCHEME + "://" + s3Bucket + "." + volumeName)); out.reset(); //Trying to get map for an unknown bucket args = new String[] {setOmAddress, "bucket", "path", "unknownbucket"}; executeWithError(shell, args, "S3_BUCKET_NOT_FOUND"); // No bucket name args = new String[] {setOmAddress, "bucket", "path"}; executeWithError(shell, args, "Missing required parameter"); // Invalid bucket name args = new String[] {setOmAddress, "bucket", "path", "/asd/multipleslash"}; executeWithError(shell, args, "S3_BUCKET_NOT_FOUND"); } private void createS3Bucket(String userName, String s3Bucket) { try { client.createS3Bucket("ozone", s3Bucket); } catch (IOException ex) { GenericTestUtils.assertExceptionContains("S3_BUCKET_ALREADY_EXISTS", ex); } } private OzoneVolume creatVolume() throws OzoneException, IOException { String volumeName = RandomStringUtils.randomNumeric(5) + "volume"; VolumeArgs volumeArgs = VolumeArgs.newBuilder() .setOwner("bilbo") .setQuota("100TB") .build(); try { client.createVolume(volumeName, volumeArgs); } catch (Exception ex) { Assert.assertEquals("PartialGroupNameException", ex.getCause().getClass().getSimpleName()); } OzoneVolume volume = client.getVolumeDetails(volumeName); return volume; } private OzoneBucket creatBucket() throws OzoneException, IOException { OzoneVolume vol = creatVolume(); String bucketName = RandomStringUtils.randomNumeric(5) + "bucket"; vol.createBucket(bucketName); OzoneBucket bucketInfo = vol.getBucket(bucketName); return bucketInfo; } /** * Create a temporary file used for putting key. * @return the created file's path string * @throws Exception */ private String createTmpFile() throws Exception { // write a new file that used for putting key File tmpFile = new File(baseDir, "/testfile-" + UUID.randomUUID().toString()); FileOutputStream randFile = new FileOutputStream(tmpFile); Random r = new Random(); for (int x = 0; x < 10; x++) { char c = (char) (r.nextInt(26) + 'a'); randFile.write(c); } randFile.close(); return tmpFile.getAbsolutePath(); } }<|fim▁end|>
execute(shell, args);
<|file_name|>Fence.cpp<|end_file_name|><|fim▁begin|>#include <gbVk/Fence.hpp> #include <gbVk/Exceptions.hpp> #include <gbBase/Assert.hpp> namespace GHULBUS_VULKAN_NAMESPACE { Fence::Fence(VkDevice logical_device, VkFence fence) :m_fence(fence), m_device(logical_device) { } Fence::~Fence() { if(m_fence) { vkDestroyFence(m_device, m_fence, nullptr); } } Fence::Fence(Fence&& rhs) :m_fence(rhs.m_fence), m_device(rhs.m_device) { rhs.m_fence = nullptr; rhs.m_device = nullptr; } VkFence Fence::getVkFence() { return m_fence; } Fence::Status Fence::getStatus() { VkResult res = vkGetFenceStatus(m_device, m_fence); if(res == VK_NOT_READY) { return Status::NotReady; } checkVulkanError(res, "Error in vkGetFenceStatus."); return Status::Ready; } void Fence::wait() { auto const status = wait_for(std::chrono::nanoseconds::max()); GHULBUS_ASSERT(status == Status::Ready); } Fence::Status Fence::wait_for(std::chrono::nanoseconds timeout) { VkResult res = vkWaitForFences(m_device, 1, &m_fence, VK_TRUE, timeout.count()); if(res == VK_NOT_READY) { return Status::NotReady; } checkVulkanError(res, "Error in vkWaitForFences."); return Status::Ready; }<|fim▁hole|> void Fence::reset() { VkResult res = vkResetFences(m_device, 1, &m_fence); checkVulkanError(res, "Error in vkResetFences."); } }<|fim▁end|>
<|file_name|>tfi_model.py<|end_file_name|><|fim▁begin|>"""A few convenience functions to setup the Ising model in a TF. TFIM stands for Ising model in a transverse field, i.e.: .. math:: H=\sum_{i}\left[S^{z}_{i}S^{z}_{i+1} + h S^{x}_{i}\right)\right] """ class TranverseFieldIsingModel(object): """Implements a few convenience functions for the TFIM. Does exactly that. """ def __init__(self, H = 0): super(TranverseFieldIsingModel, self).__init__() self.H = H def set_hamiltonian(self, system): """Sets a system Hamiltonian to the TFIM Hamiltonian. Does exactly this. If the system hamiltonian has some other terms on it, there are not touched. So be sure to use this function only in newly created `System` objects. Parameters ---------- system : a System. The System you want to set the Hamiltonain for. """ system.clear_hamiltonian() if 'bh' in system.left_block.operators.keys(): system.add_to_hamiltonian(left_block_op='bh')<|fim▁hole|> if 'bh' in system.right_block.operators.keys(): system.add_to_hamiltonian(right_block_op='bh') system.add_to_hamiltonian('id', 'id', 's_z', 's_z', -1.) system.add_to_hamiltonian('id', 's_z', 's_z', 'id', -1.) system.add_to_hamiltonian('s_z', 's_z', 'id', 'id', -1.) system.add_to_hamiltonian('id', 'id', 'id', 's_x', self.H) system.add_to_hamiltonian('id', 'id', 's_x', 'id', self.H) system.add_to_hamiltonian('id', 's_x', 'id', 'id', self.H) system.add_to_hamiltonian('s_x', 'id', 'id', 'id', self.H) def set_block_hamiltonian(self, tmp_matrix_for_bh, system): """Sets the block Hamiltonian to be what you need for TFIM. Parameters ---------- tmp_matrix_for_bh : a numpy array of ndim = 2. An auxiliary matrix to keep track of the result. system : a System. The System you want to set the Hamiltonian for. """ # If you have a block hamiltonian in your block, add it if 'bh' in system.growing_block.operators.keys(): system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'bh', 'id') system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_z', 's_z', -1.) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 'id', 's_x', self.H) system.add_to_block_hamiltonian(tmp_matrix_for_bh, 's_x', 'id', self.H) def set_operators_to_update(self, system): """Sets the operators to update to be what you need to TFIM. Parameters ---------- system : a System. The System you want to set the Hamiltonian for. Notes ----- The block Hamiltonian, althought needs to be updated, is treated separately by the very functions in the `System` class. """ system.add_to_operators_to_update('s_z', site_op='s_z') system.add_to_operators_to_update('s_x', site_op='s_x')<|fim▁end|>
<|file_name|>analysis.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
# proxy module from __future__ import absolute_import from codetools.blocks.analysis import *
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>pub mod brainfuck; mod optimizer; use std::fmt::{Display, Formatter, Result};<|fim▁hole|>pub enum BasicCmd { Skip, Rewind, Add, Sub, } impl Display for BasicCmd { fn fmt(&self, f: &mut Formatter) -> Result { write!(f, "{:?}", self) } } // workaround because FaustCmd::Repeatable is 'not a valid type' #[derive(Clone, PartialEq, Debug)] pub struct Repeatable(BasicCmd, usize); impl Display for Repeatable { fn fmt(&self, f: &mut Formatter) -> Result { write!(f, "{:?}", self) } } #[derive(Clone, PartialEq, Debug)] pub enum FaustCmd { Repeatable(BasicCmd, usize), Addressed(Repeatable, usize), Clear, Output, Input, JumpEqualZero, JumpNotZero, DebugPrint, Breakpoint, ToggleBuffer, Buffer(String), // Iteration primitives For(Vec<FaustCmd>, usize), // loop and reduce by usize ScanFwd(Vec<FaustCmd>, usize), // do something and skip by usize ScanBk(Vec<FaustCmd>, usize), // do something and rewind by usize } impl Display for FaustCmd { fn fmt(&self, f: &mut Formatter) -> Result { write!(f, "<{:?}>", self) } } trait Frontend { fn basic(&self, code: &String) -> Vec<FaustCmd>; fn optimize(&self, code: &String) -> Vec<FaustCmd> { optimizer::full_optimize(self.basic(code)) } }<|fim▁end|>
#[derive(Clone, PartialEq, Debug)]
<|file_name|>sql.py<|end_file_name|><|fim▁begin|>""" Collection of query wrappers / abstractions to both facilitate data retrieval and to reduce dependency on DB-specific API. """ from contextlib import contextmanager from datetime import date, datetime, time from functools import partial import re from typing import Iterator, Optional, Union, overload import warnings import numpy as np import pandas._libs.lib as lib from pandas.core.dtypes.common import is_datetime64tz_dtype, is_dict_like, is_list_like from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas.core.api import DataFrame, Series from pandas.core.base import PandasObject from pandas.core.tools.datetimes import to_datetime class SQLAlchemyRequired(ImportError): pass class DatabaseError(IOError): pass # ----------------------------------------------------------------------------- # -- Helper functions _SQLALCHEMY_INSTALLED = None def _is_sqlalchemy_connectable(con): global _SQLALCHEMY_INSTALLED if _SQLALCHEMY_INSTALLED is None: try: import sqlalchemy _SQLALCHEMY_INSTALLED = True except ImportError: _SQLALCHEMY_INSTALLED = False if _SQLALCHEMY_INSTALLED: import sqlalchemy # noqa: F811 return isinstance(con, sqlalchemy.engine.Connectable) else: return False def _convert_params(sql, params): """Convert SQL and params args to DBAPI2.0 compliant format.""" args = [sql] if params is not None: if hasattr(params, "keys"): # test if params is a mapping args += [params] else: args += [list(params)] return args def _process_parse_dates_argument(parse_dates): """Process parse_dates argument for read_sql functions""" # handle non-list entries for parse_dates gracefully if parse_dates is True or parse_dates is None or parse_dates is False: parse_dates = [] elif not hasattr(parse_dates, "__iter__"): parse_dates = [parse_dates] return parse_dates def _handle_date_column(col, utc=None, format=None): if isinstance(format, dict): return to_datetime(col, errors="ignore", **format) else: # Allow passing of formatting string for integers # GH17855 if format is None and ( issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer) ): format = "s" if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]: return to_datetime(col, errors="coerce", unit=format, utc=utc) elif is_datetime64tz_dtype(col.dtype): # coerce to UTC timezone # GH11216 return to_datetime(col, utc=True) else: return to_datetime(col, errors="coerce", format=format, utc=utc) def _parse_date_columns(data_frame, parse_dates): """ Force non-datetime columns to be read as such. Supports both string formatted and integer timestamp columns. """ parse_dates = _process_parse_dates_argument(parse_dates) # we want to coerce datetime64_tz dtypes for now to UTC # we could in theory do a 'nice' conversion from a FixedOffset tz # GH11216 for col_name, df_col in data_frame.items(): if is_datetime64tz_dtype(df_col.dtype) or col_name in parse_dates: try: fmt = parse_dates[col_name] except TypeError: fmt = None data_frame[col_name] = _handle_date_column(df_col, format=fmt) return data_frame def _wrap_result(data, columns, index_col=None, coerce_float=True, parse_dates=None): """Wrap result set of query in a DataFrame.""" frame = DataFrame.from_records(data, columns=columns, coerce_float=coerce_float) frame = _parse_date_columns(frame, parse_dates) if index_col is not None: frame.set_index(index_col, inplace=True) return frame def execute(sql, con, cur=None, params=None): """ Execute the given SQL query using the provided connection object. Parameters ---------- sql : string SQL query to be executed. con : SQLAlchemy connectable(engine/connection) or sqlite3 connection Using SQLAlchemy makes it possible to use any DB supported by the library. If a DBAPI2 object, only sqlite3 is supported. cur : deprecated, cursor is obtained from connection, default: None params : list or tuple, optional, default: None List of parameters to pass to execute method. Returns ------- Results Iterable """ if cur is None: pandas_sql = pandasSQL_builder(con) else: pandas_sql = pandasSQL_builder(cur, is_cursor=True) args = _convert_params(sql, params) return pandas_sql.execute(*args) # ----------------------------------------------------------------------------- # -- Read and write to DataFrames @overload def read_sql_table( table_name, con, schema=None, index_col=None, coerce_float=True, parse_dates=None, columns=None, chunksize: None = None, ) -> DataFrame: ... @overload def read_sql_table( table_name, con, schema=None, index_col=None, coerce_float=True, parse_dates=None, columns=None, chunksize: int = 1, ) -> Iterator[DataFrame]: ... def read_sql_table( table_name, con, schema=None, index_col=None, coerce_float=True, parse_dates=None, columns=None, chunksize: Optional[int] = None, ) -> Union[DataFrame, Iterator[DataFrame]]: """ Read SQL database table into a DataFrame. Given a table name and a SQLAlchemy connectable, returns a DataFrame. This function does not support DBAPI connections. Parameters ---------- table_name : str Name of SQL table in database. con : SQLAlchemy connectable or str A database URI could be provided as as str. SQLite DBAPI connection mode not supported. schema : str, default None Name of SQL schema in database to query (if database flavor supports this). Uses default schema if None (default). index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. Can result in loss of Precision. parse_dates : list or dict, default None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns : list, default None List of column names to select from SQL table. chunksize : int, default None If specified, returns an iterator where `chunksize` is the number of rows to include in each chunk. Returns ------- DataFrame or Iterator[DataFrame] A SQL table is returned as two-dimensional data structure with labeled axes. See Also -------- read_sql_query : Read SQL query into a DataFrame. read_sql : Read SQL query or database table into a DataFrame. Notes ----- Any datetime values with time zone information will be converted to UTC. Examples -------- >>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP """ con = _engine_builder(con) if not _is_sqlalchemy_connectable(con): raise NotImplementedError( "read_sql_table only supported for SQLAlchemy connectable." ) import sqlalchemy from sqlalchemy.schema import MetaData meta = MetaData(con, schema=schema) try: meta.reflect(only=[table_name], views=True) except sqlalchemy.exc.InvalidRequestError as err: raise ValueError(f"Table {table_name} not found") from err pandas_sql = SQLDatabase(con, meta=meta) table = pandas_sql.read_table( table_name, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns, chunksize=chunksize, ) if table is not None: return table else: raise ValueError(f"Table {table_name} not found", con) @overload def read_sql_query( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize: None = None, ) -> DataFrame: ... @overload def read_sql_query( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize: int = 1, ) -> Iterator[DataFrame]: ... def read_sql_query( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize: Optional[int] = None, ) -> Union[DataFrame, Iterator[DataFrame]]: """ Read SQL query into a DataFrame. Returns a DataFrame corresponding to the result set of the query string. Optionally provide an `index_col` parameter to use one of the columns as the index, otherwise default integer index will be used. Parameters ---------- sql : str SQL query or SQLAlchemy Selectable (select or text object) SQL query to be executed. con : SQLAlchemy connectable(engine/connection), database str URI, or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. Useful for SQL result sets. params : list, tuple or dict, optional, default: None List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}. parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. chunksize : int, default None If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. Returns ------- DataFrame or Iterator[DataFrame] See Also -------- read_sql_table : Read SQL database table into a DataFrame. read_sql : Read SQL query or database table into a DataFrame. Notes ----- Any datetime values with time zone information parsed via the `parse_dates` parameter will be converted to UTC. """ pandas_sql = pandasSQL_builder(con) return pandas_sql.read_query( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates, chunksize=chunksize, ) @overload def read_sql( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize: None = None, ) -> DataFrame: ... @overload def read_sql( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize: int = 1, ) -> Iterator[DataFrame]: ... def read_sql( sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize: Optional[int] = None, ) -> Union[DataFrame, Iterator[DataFrame]]: """ Read SQL query or database table into a DataFrame. This function is a convenience wrapper around ``read_sql_table`` and ``read_sql_query`` (for backward compatibility). It will delegate to the specific function depending on the provided input. A SQL query will be routed to ``read_sql_query``, while a database table name will be routed to ``read_sql_table``. Note that the delegated function might have more specific notes about their functionality not listed here. Parameters ---------- sql : str or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con : SQLAlchemy connectable (engine/connection) or database str URI or DBAPI2 connection (fallback mode). Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible for engine disposal and connection closure for the SQLAlchemy connectable. See `here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_. index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. params : list, tuple or dict, optional, default: None List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}. parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns : list, default: None List of column names to select from SQL table (only used when reading a table). chunksize : int, default None If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. Returns ------- DataFrame or Iterator[DataFrame] See Also -------- read_sql_table : Read SQL database table into a DataFrame. read_sql_query : Read SQL query into a DataFrame. """ pandas_sql = pandasSQL_builder(con) if isinstance(pandas_sql, SQLiteDatabase): return pandas_sql.read_query( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates, chunksize=chunksize, ) try: _is_table_name = pandas_sql.has_table(sql) except Exception: # using generic exception to catch errors from sql drivers (GH24988) _is_table_name = False if _is_table_name: pandas_sql.meta.reflect(only=[sql]) return pandas_sql.read_table( sql, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns, chunksize=chunksize, ) else: return pandas_sql.read_query( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates, chunksize=chunksize, ) def to_sql( frame, name, con, schema=None, if_exists="fail", index=True, index_label=None, chunksize=None, dtype=None, method=None, ) -> None: """ Write records stored in a DataFrame to a SQL database. Parameters ---------- frame : DataFrame, Series name : str Name of SQL table. con : SQLAlchemy connectable(engine/connection) or database string URI or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. schema : str, optional Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : boolean, default True Write DataFrame index as a column. index_label : str or sequence, optional Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, optional Specify the number of rows in each batch to be written at a time. By default, all rows will be written at once. dtype : dict or scalar, optional Specifying the datatype for columns. If a dictionary is used, the keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 fallback mode. If a scalar is provided, it will be applied to all columns. method : {None, 'multi', callable}, optional Controls the SQL insertion clause used: - None : Uses standard SQL ``INSERT`` clause (one per row). - 'multi': Pass multiple values in a single ``INSERT`` clause. - callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. .. versionadded:: 0.24.0 """ if if_exists not in ("fail", "replace", "append"): raise ValueError(f"'{if_exists}' is not valid for if_exists") pandas_sql = pandasSQL_builder(con, schema=schema) if isinstance(frame, Series): frame = frame.to_frame() elif not isinstance(frame, DataFrame): raise NotImplementedError( "'frame' argument should be either a Series or a DataFrame" ) pandas_sql.to_sql( frame, name, if_exists=if_exists, index=index, index_label=index_label, schema=schema, chunksize=chunksize, dtype=dtype, method=method, ) def has_table(table_name, con, schema=None): """ Check if DataBase has named table. Parameters ---------- table_name: string Name of SQL table. con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). Returns ------- boolean """ pandas_sql = pandasSQL_builder(con, schema=schema) return pandas_sql.has_table(table_name) table_exists = has_table def _engine_builder(con): """ Returns a SQLAlchemy engine from a URI (if con is a string) else it just return con without modifying it. """ global _SQLALCHEMY_INSTALLED if isinstance(con, str): try: import sqlalchemy except ImportError: _SQLALCHEMY_INSTALLED = False else: con = sqlalchemy.create_engine(con) return con return con def pandasSQL_builder(con, schema=None, meta=None, is_cursor=False): """ Convenience function to return the correct PandasSQL subclass based on the provided parameters. """ # When support for DBAPI connections is removed, # is_cursor should not be necessary. con = _engine_builder(con) if _is_sqlalchemy_connectable(con): return SQLDatabase(con, schema=schema, meta=meta) elif isinstance(con, str): raise ImportError("Using URI string without sqlalchemy installed.") else: return SQLiteDatabase(con, is_cursor=is_cursor) class SQLTable(PandasObject): """ For mapping Pandas tables to SQL tables. Uses fact that table is reflected by SQLAlchemy to do better type conversions. Also holds various flags needed to avoid having to pass them between functions all the time. """ # TODO: support for multiIndex def __init__( self, name, pandas_sql_engine, frame=None, index=True, if_exists="fail", prefix="pandas", index_label=None, schema=None, keys=None, dtype=None, ): self.name = name self.pd_sql = pandas_sql_engine self.prefix = prefix self.frame = frame self.index = self._index_name(index, index_label) self.schema = schema self.if_exists = if_exists self.keys = keys self.dtype = dtype if frame is not None: # We want to initialize based on a dataframe self.table = self._create_table_setup() else: # no data provided, read-only mode self.table = self.pd_sql.get_table(self.name, self.schema) if self.table is None: raise ValueError(f"Could not init table '{name}'") def exists(self): return self.pd_sql.has_table(self.name, self.schema) def sql_schema(self): from sqlalchemy.schema import CreateTable return str(CreateTable(self.table).compile(self.pd_sql.connectable)) def _execute_create(self): # Inserting table into database, add to MetaData object self.table = self.table.tometadata(self.pd_sql.meta) self.table.create() def create(self): if self.exists(): if self.if_exists == "fail": raise ValueError(f"Table '{self.name}' already exists.") elif self.if_exists == "replace": self.pd_sql.drop_table(self.name, self.schema) self._execute_create() elif self.if_exists == "append": pass else: raise ValueError(f"'{self.if_exists}' is not valid for if_exists") else: self._execute_create() def _execute_insert(self, conn, keys, data_iter): """ Execute SQL statement inserting data Parameters ---------- conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection keys : list of str Column names data_iter : generator of list Each item contains a list of values to be inserted """ data = [dict(zip(keys, row)) for row in data_iter] conn.execute(self.table.insert(), data) def _execute_insert_multi(self, conn, keys, data_iter): """ Alternative to _execute_insert for DBs support multivalue INSERT. Note: multi-value insert is usually faster for analytics DBs and tables containing a few columns but performance degrades quickly with increase of columns. """ data = [dict(zip(keys, row)) for row in data_iter] conn.execute(self.table.insert(data)) def insert_data(self): if self.index is not None: temp = self.frame.copy() temp.index.names = self.index try: temp.reset_index(inplace=True) except ValueError as err: raise ValueError(f"duplicate name in index/columns: {err}") from err else: temp = self.frame column_names = list(map(str, temp.columns)) ncols = len(column_names) data_list = [None] * ncols for i, (_, ser) in enumerate(temp.items()): vals = ser._values if vals.dtype.kind == "M": d = vals.to_pydatetime() elif vals.dtype.kind == "m": # store as integers, see GH#6921, GH#7076 d = vals.view("i8").astype(object) else: d = vals.astype(object) assert isinstance(d, np.ndarray), type(d) if ser._can_hold_na: # Note: this will miss timedeltas since they are converted to int mask = isna(d) d[mask] = None data_list[i] = d return column_names, data_list def insert(self, chunksize=None, method=None): # set insert method if method is None: exec_insert = self._execute_insert elif method == "multi": exec_insert = self._execute_insert_multi elif callable(method): exec_insert = partial(method, self) else: raise ValueError(f"Invalid parameter `method`: {method}") keys, data_list = self.insert_data() nrows = len(self.frame) if nrows == 0: return if chunksize is None: chunksize = nrows elif chunksize == 0: raise ValueError("chunksize argument should be non-zero") chunks = int(nrows / chunksize) + 1 with self.pd_sql.run_transaction() as conn: for i in range(chunks): start_i = i * chunksize end_i = min((i + 1) * chunksize, nrows) if start_i >= end_i: break chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list]) exec_insert(conn, keys, chunk_iter) def _query_iterator( self, result, chunksize, columns, coerce_float=True, parse_dates=None ): """Return generator through chunked result set.""" while True: data = result.fetchmany(chunksize) if not data: break else: self.frame = DataFrame.from_records( data, columns=columns, coerce_float=coerce_float ) self._harmonize_columns(parse_dates=parse_dates) if self.index is not None: self.frame.set_index(self.index, inplace=True) yield self.frame def read(self, coerce_float=True, parse_dates=None, columns=None, chunksize=None): if columns is not None and len(columns) > 0: from sqlalchemy import select cols = [self.table.c[n] for n in columns] if self.index is not None: for idx in self.index[::-1]: cols.insert(0, self.table.c[idx]) sql_select = select(cols) else: sql_select = self.table.select() result = self.pd_sql.execute(sql_select) column_names = result.keys() if chunksize is not None: return self._query_iterator( result, chunksize, column_names, coerce_float=coerce_float, parse_dates=parse_dates, ) else: data = result.fetchall() self.frame = DataFrame.from_records( data, columns=column_names, coerce_float=coerce_float ) self._harmonize_columns(parse_dates=parse_dates) if self.index is not None: self.frame.set_index(self.index, inplace=True) return self.frame def _index_name(self, index, index_label): # for writing: index=True to include index in sql table if index is True: nlevels = self.frame.index.nlevels # if index_label is specified, set this as index name(s) if index_label is not None: if not isinstance(index_label, list): index_label = [index_label] if len(index_label) != nlevels: raise ValueError( "Length of 'index_label' should match number of " f"levels, which is {nlevels}" ) else: return index_label # return the used column labels for the index columns if ( nlevels == 1 and "index" not in self.frame.columns and self.frame.index.name is None ): return ["index"] else: return [ l if l is not None else f"level_{i}" for i, l in enumerate(self.frame.index.names) ] # for reading: index=(list of) string to specify column to set as index elif isinstance(index, str): return [index] elif isinstance(index, list): return index else: return None def _get_column_names_and_types(self, dtype_mapper): column_names_and_types = [] if self.index is not None: for i, idx_label in enumerate(self.index): idx_type = dtype_mapper(self.frame.index._get_level_values(i)) column_names_and_types.append((str(idx_label), idx_type, True)) column_names_and_types += [ (str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False) for i in range(len(self.frame.columns)) ] return column_names_and_types def _create_table_setup(self): from sqlalchemy import Table, Column, PrimaryKeyConstraint column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type) columns = [ Column(name, typ, index=is_index) for name, typ, is_index in column_names_and_types ] <|fim▁hole|> if not is_list_like(self.keys): keys = [self.keys] else: keys = self.keys pkc = PrimaryKeyConstraint(*keys, name=self.name + "_pk") columns.append(pkc) schema = self.schema or self.pd_sql.meta.schema # At this point, attach to new metadata, only attach to self.meta # once table is created. from sqlalchemy.schema import MetaData meta = MetaData(self.pd_sql, schema=schema) return Table(self.name, meta, *columns, schema=schema) def _harmonize_columns(self, parse_dates=None): """ Make the DataFrame's column types align with the SQL table column types. Need to work around limited NA value support. Floats are always fine, ints must always be floats if there are Null values. Booleans are hard because converting bool column with None replaces all Nones with false. Therefore only convert bool if there are no NA values. Datetimes should already be converted to np.datetime64 if supported, but here we also force conversion if required. """ parse_dates = _process_parse_dates_argument(parse_dates) for sql_col in self.table.columns: col_name = sql_col.name try: df_col = self.frame[col_name] # Handle date parsing upfront; don't try to convert columns # twice if col_name in parse_dates: try: fmt = parse_dates[col_name] except TypeError: fmt = None self.frame[col_name] = _handle_date_column(df_col, format=fmt) continue # the type the dataframe column should have col_type = self._get_dtype(sql_col.type) if ( col_type is datetime or col_type is date or col_type is DatetimeTZDtype ): # Convert tz-aware Datetime SQL columns to UTC utc = col_type is DatetimeTZDtype self.frame[col_name] = _handle_date_column(df_col, utc=utc) elif col_type is float: # floats support NA, can always convert! self.frame[col_name] = df_col.astype(col_type, copy=False) elif len(df_col) == df_col.count(): # No NA values, can convert ints and bools if col_type is np.dtype("int64") or col_type is bool: self.frame[col_name] = df_col.astype(col_type, copy=False) except KeyError: pass # this column not in results def _sqlalchemy_type(self, col): dtype = self.dtype or {} if col.name in dtype: return self.dtype[col.name] # Infer type of column, while ignoring missing values. # Needed for inserting typed data containing NULLs, GH 8778. col_type = lib.infer_dtype(col, skipna=True) from sqlalchemy.types import ( BigInteger, Integer, Float, Text, Boolean, DateTime, Date, Time, TIMESTAMP, ) if col_type == "datetime64" or col_type == "datetime": # GH 9086: TIMESTAMP is the suggested type if the column contains # timezone information try: if col.dt.tz is not None: return TIMESTAMP(timezone=True) except AttributeError: # The column is actually a DatetimeIndex # GH 26761 or an Index with date-like data e.g. 9999-01-01 if getattr(col, "tz", None) is not None: return TIMESTAMP(timezone=True) return DateTime if col_type == "timedelta64": warnings.warn( "the 'timedelta' type is not supported, and will be " "written as integer values (ns frequency) to the database.", UserWarning, stacklevel=8, ) return BigInteger elif col_type == "floating": if col.dtype == "float32": return Float(precision=23) else: return Float(precision=53) elif col_type == "integer": if col.dtype == "int32": return Integer else: return BigInteger elif col_type == "boolean": return Boolean elif col_type == "date": return Date elif col_type == "time": return Time elif col_type == "complex": raise ValueError("Complex datatypes not supported") return Text def _get_dtype(self, sqltype): from sqlalchemy.types import Integer, Float, Boolean, DateTime, Date, TIMESTAMP if isinstance(sqltype, Float): return float elif isinstance(sqltype, Integer): # TODO: Refine integer size. return np.dtype("int64") elif isinstance(sqltype, TIMESTAMP): # we have a timezone capable type if not sqltype.timezone: return datetime return DatetimeTZDtype elif isinstance(sqltype, DateTime): # Caution: np.datetime64 is also a subclass of np.number. return datetime elif isinstance(sqltype, Date): return date elif isinstance(sqltype, Boolean): return bool return object class PandasSQL(PandasObject): """ Subclasses Should define read_sql and to_sql. """ def read_sql(self, *args, **kwargs): raise ValueError( "PandasSQL must be created with an SQLAlchemy " "connectable or sqlite connection" ) def to_sql(self, *args, **kwargs): raise ValueError( "PandasSQL must be created with an SQLAlchemy " "connectable or sqlite connection" ) class SQLDatabase(PandasSQL): """ This class enables conversion between DataFrame and SQL databases using SQLAlchemy to handle DataBase abstraction. Parameters ---------- engine : SQLAlchemy connectable Connectable to connect with the database. Using SQLAlchemy makes it possible to use any DB supported by that library. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). meta : SQLAlchemy MetaData object, default None If provided, this MetaData object is used instead of a newly created. This allows to specify database flavor specific arguments in the MetaData object. """ def __init__(self, engine, schema=None, meta=None): self.connectable = engine if not meta: from sqlalchemy.schema import MetaData meta = MetaData(self.connectable, schema=schema) self.meta = meta @contextmanager def run_transaction(self): with self.connectable.begin() as tx: if hasattr(tx, "execute"): yield tx else: yield self.connectable def execute(self, *args, **kwargs): """Simple passthrough to SQLAlchemy connectable""" return self.connectable.execution_options(no_parameters=True).execute( *args, **kwargs ) def read_table( self, table_name, index_col=None, coerce_float=True, parse_dates=None, columns=None, schema=None, chunksize=None, ): """ Read SQL database table into a DataFrame. Parameters ---------- table_name : string Name of SQL table in database. index_col : string, optional, default: None Column to set as index. coerce_float : boolean, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. This can result in loss of precision. parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg}``, where the arg corresponds to the keyword arguments of :func:`pandas.to_datetime`. Especially useful with databases without native Datetime support, such as SQLite. columns : list, default: None List of column names to select from SQL table. schema : string, default None Name of SQL schema in database to query (if database flavor supports this). If specified, this overwrites the default schema of the SQL database object. chunksize : int, default None If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. Returns ------- DataFrame See Also -------- pandas.read_sql_table SQLDatabase.read_query """ table = SQLTable(table_name, self, index=index_col, schema=schema) return table.read( coerce_float=coerce_float, parse_dates=parse_dates, columns=columns, chunksize=chunksize, ) @staticmethod def _query_iterator( result, chunksize, columns, index_col=None, coerce_float=True, parse_dates=None ): """Return generator through chunked result set""" while True: data = result.fetchmany(chunksize) if not data: break else: yield _wrap_result( data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, ) def read_query( self, sql, index_col=None, coerce_float=True, parse_dates=None, params=None, chunksize=None, ): """ Read SQL query into a DataFrame. Parameters ---------- sql : string SQL query to be executed. index_col : string, optional, default: None Column name to use as index for the returned DataFrame object. coerce_float : boolean, default True Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. params : list, tuple or dict, optional, default: None List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'} parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. chunksize : int, default None If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. Returns ------- DataFrame See Also -------- read_sql_table : Read SQL database table into a DataFrame. read_sql """ args = _convert_params(sql, params) result = self.execute(*args) columns = result.keys() if chunksize is not None: return self._query_iterator( result, chunksize, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, ) else: data = result.fetchall() frame = _wrap_result( data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, ) return frame read_sql = read_query def to_sql( self, frame, name, if_exists="fail", index=True, index_label=None, schema=None, chunksize=None, dtype=None, method=None, ): """ Write records stored in a DataFrame to a SQL database. Parameters ---------- frame : DataFrame name : string Name of SQL table. if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : boolean, default True Write DataFrame index as a column. index_label : string or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If specified, this overwrites the default schema of the SQLDatabase object. chunksize : int, default None If not None, then rows will be written in batches of this size at a time. If None, all rows will be written at once. dtype : single type or dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type. If all columns are of the same type, one single value can be used. method : {None', 'multi', callable}, default None Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. .. versionadded:: 0.24.0 """ if dtype and not is_dict_like(dtype): dtype = {col_name: dtype for col_name in frame} if dtype is not None: from sqlalchemy.types import to_instance, TypeEngine for col, my_type in dtype.items(): if not isinstance(to_instance(my_type), TypeEngine): raise ValueError(f"The type of {col} is not a SQLAlchemy type") table = SQLTable( name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label, schema=schema, dtype=dtype, ) table.create() table.insert(chunksize, method=method) if not name.isdigit() and not name.islower(): # check for potentially case sensitivity issues (GH7815) # Only check when name is not a number and name is not lower case engine = self.connectable.engine with self.connectable.connect() as conn: table_names = engine.table_names( schema=schema or self.meta.schema, connection=conn ) if name not in table_names: msg = ( f"The provided table name '{name}' is not found exactly as " "such in the database after writing the table, possibly " "due to case sensitivity issues. Consider using lower " "case table names." ) warnings.warn(msg, UserWarning) @property def tables(self): return self.meta.tables def has_table(self, name, schema=None): return self.connectable.run_callable( self.connectable.dialect.has_table, name, schema or self.meta.schema ) def get_table(self, table_name, schema=None): schema = schema or self.meta.schema if schema: tbl = self.meta.tables.get(".".join([schema, table_name])) else: tbl = self.meta.tables.get(table_name) # Avoid casting double-precision floats into decimals from sqlalchemy import Numeric for column in tbl.columns: if isinstance(column.type, Numeric): column.type.asdecimal = False return tbl def drop_table(self, table_name, schema=None): schema = schema or self.meta.schema if self.has_table(table_name, schema): self.meta.reflect(only=[table_name], schema=schema) self.get_table(table_name, schema).drop() self.meta.clear() def _create_sql_schema(self, frame, table_name, keys=None, dtype=None): table = SQLTable( table_name, self, frame=frame, index=False, keys=keys, dtype=dtype ) return str(table.sql_schema()) # ---- SQL without SQLAlchemy --- # sqlite-specific sql strings and handler class # dictionary used for readability purposes _SQL_TYPES = { "string": "TEXT", "floating": "REAL", "integer": "INTEGER", "datetime": "TIMESTAMP", "date": "DATE", "time": "TIME", "boolean": "INTEGER", } def _get_unicode_name(name): try: uname = str(name).encode("utf-8", "strict").decode("utf-8") except UnicodeError as err: raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err return uname def _get_valid_sqlite_name(name): # See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\ # -for-sqlite-table-column-names-in-python # Ensure the string can be encoded as UTF-8. # Ensure the string does not include any NUL characters. # Replace all " with "". # Wrap the entire thing in double quotes. uname = _get_unicode_name(name) if not len(uname): raise ValueError("Empty table or column name specified") nul_index = uname.find("\x00") if nul_index >= 0: raise ValueError("SQLite identifier cannot contain NULs") return '"' + uname.replace('"', '""') + '"' _SAFE_NAMES_WARNING = ( "The spaces in these column names will not be changed. " "In pandas versions < 0.14, spaces were converted to underscores." ) class SQLiteTable(SQLTable): """ Patch the SQLTable for fallback support. Instead of a table variable just use the Create Table statement. """ def __init__(self, *args, **kwargs): # GH 8341 # register an adapter callable for datetime.time object import sqlite3 # this will transform time(12,34,56,789) into '12:34:56.000789' # (this is what sqlalchemy does) sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f")) super().__init__(*args, **kwargs) def sql_schema(self): return str(";\n".join(self.table)) def _execute_create(self): with self.pd_sql.run_transaction() as conn: for stmt in self.table: conn.execute(stmt) def insert_statement(self, *, num_rows): names = list(map(str, self.frame.columns)) wld = "?" # wildcard char escape = _get_valid_sqlite_name if self.index is not None: for idx in self.index[::-1]: names.insert(0, idx) bracketed_names = [escape(column) for column in names] col_names = ",".join(bracketed_names) row_wildcards = ",".join([wld] * len(names)) wildcards = ",".join(f"({row_wildcards})" for _ in range(num_rows)) insert_statement = ( f"INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}" ) return insert_statement def _execute_insert(self, conn, keys, data_iter): data_list = list(data_iter) conn.executemany(self.insert_statement(num_rows=1), data_list) def _execute_insert_multi(self, conn, keys, data_iter): data_list = list(data_iter) flattened_data = [x for row in data_list for x in row] conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data) def _create_table_setup(self): """ Return a list of SQL statements that creates a table reflecting the structure of a DataFrame. The first entry will be a CREATE TABLE statement while the rest will be CREATE INDEX statements. """ column_names_and_types = self._get_column_names_and_types(self._sql_type_name) pat = re.compile(r"\s+") column_names = [col_name for col_name, _, _ in column_names_and_types] if any(map(pat.search, column_names)): warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6) escape = _get_valid_sqlite_name create_tbl_stmts = [ escape(cname) + " " + ctype for cname, ctype, _ in column_names_and_types ] if self.keys is not None and len(self.keys): if not is_list_like(self.keys): keys = [self.keys] else: keys = self.keys cnames_br = ", ".join(escape(c) for c in keys) create_tbl_stmts.append( f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})" ) create_stmts = [ "CREATE TABLE " + escape(self.name) + " (\n" + ",\n ".join(create_tbl_stmts) + "\n)" ] ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index] if len(ix_cols): cnames = "_".join(ix_cols) cnames_br = ",".join(escape(c) for c in ix_cols) create_stmts.append( "CREATE INDEX " + escape("ix_" + self.name + "_" + cnames) + "ON " + escape(self.name) + " (" + cnames_br + ")" ) return create_stmts def _sql_type_name(self, col): dtype = self.dtype or {} if col.name in dtype: return dtype[col.name] # Infer type of column, while ignoring missing values. # Needed for inserting typed data containing NULLs, GH 8778. col_type = lib.infer_dtype(col, skipna=True) if col_type == "timedelta64": warnings.warn( "the 'timedelta' type is not supported, and will be " "written as integer values (ns frequency) to the database.", UserWarning, stacklevel=8, ) col_type = "integer" elif col_type == "datetime64": col_type = "datetime" elif col_type == "empty": col_type = "string" elif col_type == "complex": raise ValueError("Complex datatypes not supported") if col_type not in _SQL_TYPES: col_type = "string" return _SQL_TYPES[col_type] class SQLiteDatabase(PandasSQL): """ Version of SQLDatabase to support SQLite connections (fallback without SQLAlchemy). This should only be used internally. Parameters ---------- con : sqlite connection object """ def __init__(self, con, is_cursor=False): self.is_cursor = is_cursor self.con = con @contextmanager def run_transaction(self): cur = self.con.cursor() try: yield cur self.con.commit() except Exception: self.con.rollback() raise finally: cur.close() def execute(self, *args, **kwargs): if self.is_cursor: cur = self.con else: cur = self.con.cursor() try: cur.execute(*args, **kwargs) return cur except Exception as exc: try: self.con.rollback() except Exception as inner_exc: # pragma: no cover ex = DatabaseError( f"Execution failed on sql: {args[0]}\n{exc}\nunable to rollback" ) raise ex from inner_exc ex = DatabaseError(f"Execution failed on sql '{args[0]}': {exc}") raise ex from exc @staticmethod def _query_iterator( cursor, chunksize, columns, index_col=None, coerce_float=True, parse_dates=None ): """Return generator through chunked result set""" while True: data = cursor.fetchmany(chunksize) if type(data) == tuple: data = list(data) if not data: cursor.close() break else: yield _wrap_result( data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, ) def read_query( self, sql, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize=None, ): args = _convert_params(sql, params) cursor = self.execute(*args) columns = [col_desc[0] for col_desc in cursor.description] if chunksize is not None: return self._query_iterator( cursor, chunksize, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, ) else: data = self._fetchall_as_list(cursor) cursor.close() frame = _wrap_result( data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, ) return frame def _fetchall_as_list(self, cur): result = cur.fetchall() if not isinstance(result, list): result = list(result) return result def to_sql( self, frame, name, if_exists="fail", index=True, index_label=None, schema=None, chunksize=None, dtype=None, method=None, ): """ Write records stored in a DataFrame to a SQL database. Parameters ---------- frame: DataFrame name: string Name of SQL table. if_exists: {'fail', 'replace', 'append'}, default 'fail' fail: If table exists, do nothing. replace: If table exists, drop it, recreate it, and insert data. append: If table exists, insert data. Create if it does not exist. index : boolean, default True Write DataFrame index as a column index_label : string or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. schema : string, default None Ignored parameter included for compatibility with SQLAlchemy version of ``to_sql``. chunksize : int, default None If not None, then rows will be written in batches of this size at a time. If None, all rows will be written at once. dtype : single type or dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a string. If all columns are of the same type, one single value can be used. method : {None, 'multi', callable}, default None Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. .. versionadded:: 0.24.0 """ if dtype and not is_dict_like(dtype): dtype = {col_name: dtype for col_name in frame} if dtype is not None: for col, my_type in dtype.items(): if not isinstance(my_type, str): raise ValueError(f"{col} ({my_type}) not a string") table = SQLiteTable( name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label, dtype=dtype, ) table.create() table.insert(chunksize, method) def has_table(self, name, schema=None): # TODO(wesm): unused? # escape = _get_valid_sqlite_name # esc_name = escape(name) wld = "?" query = f"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};" return len(self.execute(query, [name]).fetchall()) > 0 def get_table(self, table_name, schema=None): return None # not supported in fallback mode def drop_table(self, name, schema=None): drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}" self.execute(drop_sql) def _create_sql_schema(self, frame, table_name, keys=None, dtype=None): table = SQLiteTable( table_name, self, frame=frame, index=False, keys=keys, dtype=dtype ) return str(table.sql_schema()) def get_schema(frame, name, keys=None, con=None, dtype=None): """ Get the SQL db table schema for the given frame. Parameters ---------- frame : DataFrame name : string name of SQL table keys : string or sequence, default: None columns to use a primary key con: an open SQL database connection object or a SQLAlchemy connectable Using SQLAlchemy makes it possible to use any DB supported by that library, default: None If a DBAPI2 object, only sqlite3 is supported. dtype : dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type, or a string for sqlite3 fallback connection. """ pandas_sql = pandasSQL_builder(con=con) return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)<|fim▁end|>
if self.keys is not None:
<|file_name|>make_ready_future.hpp<|end_file_name|><|fim▁begin|>#pragma once #include <agency/detail/config.hpp> #include <agency/detail/requires.hpp> #include <agency/future.hpp> #include <agency/execution/executor/executor_traits/executor_future.hpp> #include <agency/execution/executor/executor_traits/is_executor.hpp> #include <utility><|fim▁hole|> namespace agency { namespace detail { template<class Executor, class T, class... Args> struct has_make_ready_future_impl { template< class Executor2, typename = decltype( std::declval<Executor2&>().template make_ready_future<T>( std::declval<Args>()... ) ) > static std::true_type test(int); template<class> static std::false_type test(...); using type = decltype(test<Executor>(0)); }; template<class Executor, class T, class... Args> using has_make_ready_future = typename has_make_ready_future_impl<Executor,T,Args...>::type; // this overload handles the case of executors which have the member function .make_ready_future() __agency_exec_check_disable__ template<class T, class Executor, class... Args> __AGENCY_ANNOTATION executor_future_t<Executor,T> make_ready_future_impl(std::true_type, Executor& exec, Args&&... args) { return exec.template make_ready_future<T>(std::forward<Args>(args)...); } // end make_ready_future_impl() // this overload handles the case of executors which do not have the member function .make_ready_future() template<class T, class Executor, class... Args> __AGENCY_ANNOTATION executor_future_t<Executor,T> make_ready_future_impl(std::false_type, Executor&, Args&&... args) { using future_type = executor_future_t<Executor,T>; return future_traits<future_type>::template make_ready<T>(std::forward<Args>(args)...); } // end make_ready_future_impl() } // end detail template<class T, class E, class... Args, __AGENCY_REQUIRES(detail::Executor<E>()) > __AGENCY_ANNOTATION executor_future_t<E,T> make_ready_future(E& exec, Args&&... args) { using check_for_member_function = detail::has_make_ready_future< E, T, Args&&... >; return detail::make_ready_future_impl<T>(check_for_member_function(), exec, std::forward<Args>(args)...); } // end make_ready_future() } // end agency<|fim▁end|>
#include <type_traits>
<|file_name|>__manifest__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>{ "name": "Account Invoice Payment Retention", "version": "14.0.1.0.1", "category": "Accounting & Finance", "author": "Ecosoft, Odoo Community Association (OCA)", "license": "AGPL-3", "website": "https://github.com/OCA/account-invoicing", "depends": ["account"], "data": [ "security/security.xml", "views/res_config_settings_views.xml", "views/account_move_views.xml", "wizard/account_payment_register_views.xml", ], "maintainer": ["kittiu"], "installable": True, "development_status": "Alpha", }<|fim▁end|>
# Copyright 2020 Ecosoft Co., Ltd. (http://ecosoft.co.th) # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
<|file_name|>package.py<|end_file_name|><|fim▁begin|># Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyOnnx(PythonPackage): """Open Neural Network Exchange (ONNX) is an open ecosystem that empowers AI developers to choose the right tools as their project evolves. ONNX provides an open source format for AI models, both deep learning and traditional ML. It defines an extensible computation graph model, as well as definitions of<|fim▁hole|> pypi = "Onnx/onnx-1.6.0.tar.gz" version('1.6.0', sha256='3b88c3fe521151651a0403c4d131cb2e0311bd28b753ef692020a432a81ce345') version('1.5.0', sha256='1a584a4ef62a6db178c257fffb06a9d8e61b41c0a80bfd8bcd8a253d72c4b0b4') depends_on('py-setuptools', type='build') # Protobuf version limit is due to https://github.com/protocolbuffers/protobuf/pull/8794 depends_on('protobuf@:3.17') depends_on('py-protobuf+cpp@:3.17', type=('build', 'run')) depends_on('py-numpy', type=('build', 'run')) depends_on('py-six', type=('build', 'run')) depends_on('[email protected]:', when='^python@:3.4', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run')) depends_on('[email protected]:', type='build') depends_on('py-pytest-runner', type='build') # 'python_out' does not recognize dllexport_decl. patch('remove_dllexport_decl.patch', when='@:1.6.0')<|fim▁end|>
built-in operators and standard data types. Currently we focus on the capabilities needed for inferencing (scoring).""" homepage = "https://github.com/onnx/onnx"
<|file_name|>.doxie.render.toc.js<|end_file_name|><|fim▁begin|>// from https://gist.github.com/mathewbyrne/1280286 slugify = function(text){ return text.toString().toLowerCase() .replace(/\s+/g, '-') // Replace spaces with - .replace(/[^\w\-]+/g, '') // Remove all non-word chars .replace(/\-\-+/g, '-') // Replace multiple - with single - .replace(/^-+/, '') // Trim - from start of text .replace(/-+$/, ''); // Trim - from end of text }<|fim▁hole|> var out = '* [' + data.title + '](https://github.com/git-tips/tips#' + slugify(data.title) + ')\n'; return out; }; module.exports = render;<|fim▁end|>
var render = function(data) { var data = data.data;
<|file_name|>problem_2.py<|end_file_name|><|fim▁begin|>''' Problem 2 @author: Kevin Ji ''' def sum_even_fibonacci( max_value ): # Initial two elements prev_term = 1 cur_term = 2 temp_sum = 2<|fim▁hole|> prev_term = cur_term cur_term = next_term if cur_term % 2 == 0: temp_sum += cur_term return temp_sum print( sum_even_fibonacci( 4000000 ) )<|fim▁end|>
while cur_term < max_value: next_term = prev_term + cur_term
<|file_name|>ex3.py<|end_file_name|><|fim▁begin|>""" example with 'calls_expected' in addtoBatch used """ import batchOpenMPI def f_mult(x) : <|fim▁hole|>f.addtoBatch(4,calls_expected=4) batchOpenMPI.processBatch() #get the workers to calculate all the inputs res = [f(4),f(4),f(4)] print(res) #another test f.addtoBatch(1) batchOpenMPI.processBatch() #get the workers to calculate all the inputs res = f(1), f(1) batchOpenMPI.end_MPI_loop(print_stats=True) #releases workers print("*** jobs executed by workers should be 2 ,(5 calls made),jobs uncollected should = 1, jobs_master=1")<|fim▁end|>
return x*2.0 f = batchOpenMPI.batchFunction(f_mult) #creating function wrapper batchOpenMPI.begin_MPI_loop() # both the workers and the master process run the same code up until here
<|file_name|>system.rs<|end_file_name|><|fim▁begin|>extern crate racer; extern crate rand; use racer::{complete_from_file, find_definition, Match, MatchType, Coordinate}; use std::io::Write; use std::fs::{self, File}; use std::path::{Path, PathBuf}; use std::thread; /// A temporary file that is removed on drop /// /// With the new constructor, you provide contents and a file is created based on the name of the /// current task. The with_name constructor allows you to choose a name. Neither forms are secure, /// and both are subject to race conditions. pub struct TmpFile { path_buf: PathBuf } impl TmpFile { /// Create a temp file with random name and `contents`. pub fn new(contents: &str) -> TmpFile { let tmp = TmpFile { path_buf: PathBuf::from(tmpname()) }; tmp.write_contents(contents); tmp } /// Create a file with `name` and `contents`. pub fn with_path<P: AsRef<Path>>(name: P, contents: &str) -> TmpFile { let tmp = TmpFile { path_buf: name.as_ref().to_path_buf() }; tmp.write_contents(contents); tmp } fn write_contents(&self, contents: &str) { File::create(self.path()).unwrap().write_all(contents.as_bytes()).unwrap(); } /// Get the Path of the TmpFile pub fn path(&self) -> &Path { self.path_buf.as_path() } } /// Make path for tmpfile fn tmpname() -> String { use rand::Rng; let thread = thread::current(); let taskname = thread.name().unwrap(); let mut p = String::from("tmpfile.") + &taskname.replace("::", "_"); // Add some random chars for c in ::rand::thread_rng().gen_ascii_chars().take(5) { p.push(c); } p } impl Drop for TmpFile { fn drop(&mut self) { if self.path().exists() { if let Err(e) = fs::remove_file(self.path()) { println!("could not remove tmpfile {}: {:?}", self.path().display(), e); } } } } pub struct TmpDir { path_buf: PathBuf } impl TmpDir { pub fn new() -> TmpDir { TmpDir::with_path(tmpname()) } pub fn with_path<P: AsRef<Path>>(name: P) -> TmpDir { let pb = PathBuf::from(name.as_ref()); fs::create_dir_all(&pb).unwrap(); TmpDir { path_buf: pb } } /// Create new file with name in the directory pub fn write_file<P: AsRef<Path>>(&self, name: P, contents: &str) -> PathBuf { let name = self.path_buf.join(name); File::create(&name).unwrap().write_all(contents.as_bytes()).unwrap(); name } pub fn path(&self) -> &Path { self.path_buf.as_path() } } impl Drop for TmpDir { fn drop(&mut self) { if self.path().exists() { if let Err(e) = fs::remove_dir_all(self.path()) { println!("could not remove tmpdir {}: {:?}", self.path().display(), e); } } } } fn get_pos_and_source(src: &str) -> (usize, String) { let point = src.find('~').unwrap(); (point, src.replace('~', "")) } /// Return the completions for the given source. /// /// The point to find completions at must be marked with '~'. fn get_all_completions(src: &str, dir: Option<TmpDir>) -> Vec<Match> { let dir = dir.unwrap_or_else(|| TmpDir::new()); let (completion_point, clean_src) = get_pos_and_source(src); let path = dir.write_file("src.rs", &clean_src); let cache = racer::FileCache::default(); let session = racer::Session::new(&cache); complete_from_file(&path, completion_point, &session).collect() } /// Return the first completion for the given source. fn get_one_completion(src: &str, dir: Option<TmpDir>) -> Match { get_all_completions(src, dir).swap_remove(0) } /// Return the first completion for the given source, which must be /// the only one. fn get_only_completion(src: &str, dir: Option<TmpDir>) -> Match { let mut all = get_all_completions(src, dir); assert_eq!(all.len(), 1); all.pop().unwrap() } /// Return the definition for the given source. /// /// The point to find the definition at must be marked with '~'. fn get_definition(src: &str, dir: Option<TmpDir>) -> Match { let dir = dir.unwrap_or_else(|| TmpDir::new()); let (completion_point, clean_src) = get_pos_and_source(src); let path = dir.write_file("src.rs", &clean_src); let cache = racer::FileCache::default(); let session = racer::Session::new(&cache); find_definition(&path, completion_point, &session).unwrap() } #[test] fn completes_fn() { let src = " fn apple() { } fn main() { let b = ap~ }"; let got = get_one_completion(src, None); assert_eq!("apple", got.matchstr); } #[test] fn finds_fn_docs() { let src = " /// Orange /// juice fn apple() { } fn main() { apple~ }"; let got = get_one_completion(src, None); assert_eq!("apple", got.matchstr); assert_eq!("Orange\njuice", got.docs); } #[test] fn finds_struct_docs() { let src = " /// Orange /// juice struct Apple { } fn main() { Apple~ }"; let got = get_one_completion(src, None); assert_eq!("Apple", got.matchstr); assert_eq!("Orange\njuice", got.docs); } #[test] fn completes_fn_with_substitute_file() { let src = " fn apple() { } fn main() { let b = ap~ }"; let (_pos, src) = get_pos_and_source(src); let cache = racer::FileCache::default(); let real_file = Path::new("not_real.rs"); let session = racer::Session::new(&cache); session.cache_file_contents(&real_file, src); let cursor = Coordinate { line: 6, column: 18 }; let got = complete_from_file(real_file, cursor, &session).nth(0).unwrap(); assert_eq!(Some(Coordinate { line: 2, column: 8 }), got.coords); assert_eq!("apple", got.matchstr); } #[test] fn completes_pub_fn_locally() { let src = " pub fn apple() { } fn main() { let b = ap~ }"; let got = get_one_completion(src, None); assert_eq!("apple", got.matchstr); } #[test] fn completes_pub_fn_locally_precached() { let src = " pub fn apple() { } fn main() { let b = ap~ }"; let (pos, src) = get_pos_and_source(src); let f = TmpFile::new(&src); let path = f.path(); let cache = racer::FileCache::default(); let session = racer::Session::new(&cache); session.cache_file_contents(&path, src.clone()); let got = complete_from_file(&path, pos, &session).nth(0).unwrap(); assert_eq!("apple", got.matchstr); } #[test] fn completes_pub_fn_from_local_package() { let src = " extern crate fixtures; use fixtures::foo; fn main() { let x = foo::~ } "; let got = get_one_completion(src, None); assert_eq!("test", got.matchstr); } #[test] fn completes_pub_fn_from_local_submodule_package() { let src = " extern crate fixtures; use fixtures::bar; fn main() { let x = bar::~ } "; let got = get_one_completion(src, None); assert_eq!("bartest", got.matchstr); } #[test] fn completes_pub_const_fn_locally() { let src = " pub const fn apple() { } fn main() { let b = ap~ }"; let got = get_one_completion(src, None); assert_eq!("apple", got.matchstr); } #[test] fn completes_local_scope_let() { let src = " fn main() { let apple = 35; let b = ap~ }"; let got = get_one_completion(src, None); assert_eq!("apple", got.matchstr); assert_eq!(29, got.point); } #[test] fn completes_via_parent_scope_let() { let src = " fn main() { let mut apple = 35; if foo { let b = ap~ } }"; let got = get_one_completion(src, None); assert_eq!("apple", got.matchstr); assert_eq!(33, got.point); } #[test] fn completes_for_vec_field_and_method() { let modsrc = " pub trait IntoIterator { type Item; type IntoIter: Iterator<Item=Self::Item>; fn into_iter(self) -> Self::IntoIter; } impl<T> IntoIterator for Vec<T> { type Item = T; type IntoIter = IntoIter<T>; fn into_iter(mut self) -> IntoIter<T> {} } pub struct IntoIter<T> {} impl<T> Iterator for IntoIter<T> { type Item = T; fn next(&mut self) -> Option<T> {} } pub struct Vec<T> {} pub enum Option<T> { None, Some(T) } "; let src = " pub mod mymod; use mymod::{Vec, IntoIter, IntoIterator, Option}; use Option::{Some, None}; struct St { stfield: i32, } impl St { pub fn stmethod(&self) -> u32 {2} } fn main() { let mut arr: Vec<St> = Vec::new(); arr.push( St{stfield: 4} ); for it in arr { it.stf it.stm } } "; let dir = TmpDir::new(); dir.write_file("mymod.rs", modsrc); let path = dir.write_file("src.rs", src); let cache = racer::FileCache::default(); let session = racer::Session::new(&cache); let cursor1 = Coordinate { line: 22, column: 18 }; let got1 = complete_from_file(&path, cursor1, &session).nth(0).unwrap(); println!("{:?}", got1); assert_eq!("stfield", got1.matchstr); let cursor2 = Coordinate { line: 23, column: 18 }; let got2 = complete_from_file(&path, cursor2, &session).nth(0).unwrap(); println!("{:?}", got2); assert_eq!("stmethod", got2.matchstr); } #[test] fn completes_trait_methods() { let src = " mod sub { pub trait Trait { fn traitf() -> bool; fn traitm(&self) -> bool; } pub struct Foo(pub bool); impl Trait for Foo { fn traitf() -> bool { false } fn traitm(&self) -> bool { true } } } fn main() { // l16 let t = sub::Foo(true); sub::Foo:: t.t } "; let f = TmpFile::new(src); let path = f.path(); let cache1 = racer::FileCache::default(); let session1 = racer::Session::new(&cache1); let cursor1 = Coordinate { line: 18, column: 18}; let got1 = complete_from_file(&path, cursor1, &session1).nth(0).unwrap(); let cache2 = racer::FileCache::default(); let session2 = racer::Session::new(&cache2); let cursor2 = Coordinate { line: 19, column: 11}; let got2 = complete_from_file(&path, cursor2, &session2).nth(0).unwrap(); println!("{:?}", got1); println!("{:?}", got2); assert_eq!(got1.matchstr, "traitf"); assert_eq!(got2.matchstr, "traitm"); assert_eq!(got1.contextstr, "fn traitf() -> bool"); assert_eq!(got2.contextstr, "fn traitm(&self) -> bool"); } #[test] fn completes_trait_bounded_methods() { let src = " pub trait Trait1 {} impl Trait1 for Foo {} pub trait Trait2 { fn traitf() -> bool; fn traitm(&self) -> bool; } impl<T: Trait1> Trait2 for T { fn traitf() -> bool { true } fn traitm(&self) -> bool { false } } pub struct Foo(pub bool); fn main() { let t = Foo(true); Foo::tra t.tr }"; let f = TmpFile::new(src); let path = f.path(); let cache1 = racer::FileCache::default(); let session1 = racer::Session::new(&cache1); let cursor1 = Coordinate { line: 20, column: 16 }; let got1 = complete_from_file(&path, cursor1, &session1).nth(0).unwrap(); let cache2 = racer::FileCache::default(); let session2 = racer::Session::new(&cache2); let cursor2 = Coordinate { line: 21, column: 12 }; let got2 = complete_from_file(&path, cursor2, &session2).nth(0).unwrap(); println!("{:?}", got1); println!("{:?}", got2); assert_eq!(got1.matchstr, "traitf"); assert_eq!(got2.matchstr, "traitm"); assert_eq!(got1.contextstr, "fn traitf() -> bool"); assert_eq!(got2.contextstr, "fn traitm(&self) -> bool"); } #[test] fn completes_trait_bounded_methods_generic_return() { let src = " pub trait Trait1 { fn traitfn(&self) -> u32 { 2 } } impl Trait1 for Foo {} pub trait Trait2 { fn traitm(self) -> Self; } impl<T: Trait1> Trait2 for T { fn traitm(self) -> T { self } } pub struct Foo(pub bool); impl Foo { pub fn structfn(&self) -> bool {self.0} } fn main() { let t = Foo(true); t.traitm().struc t.traitm().traitf }"; let f = TmpFile::new(src); let path = f.path(); let cache = racer::FileCache::default(); let session = racer::Session::new(&cache); let cursor1 = Coordinate { line: 24, column: 24 }; let cursor2 = Coordinate { line: 25, column: 25 }; let got1 = complete_from_file(&path, cursor1, &session).nth(0).unwrap(); let got2 = complete_from_file(&path, cursor2, &session).nth(0).unwrap(); println!("{:?}", got1); println!("{:?}", got2); assert_eq!(got1.matchstr, "structfn"); assert_eq!(got2.matchstr, "traitfn"); } #[test] fn completes_iter_variable_methods() { let modsrc = " pub trait Iterator { type Item; fn next(&mut self) -> Option<Self::Item>; } pub trait IntoIterator { type Item; type IntoIter: Iterator<Item=Self::Item>; fn into_iter(self) -> Self::IntoIter; } impl<I: Iterator> IntoIterator for I { type Item = I::Item; type IntoIter = I; fn into_iter(self) -> I { self } } impl<T> Iterator for IntoIter<T> { type Item = T; fn next(&mut self) -> Option<T> {} } pub enum Option<T> { None, Some(T) } "; let src = " pub mod mymod; use mymod::{Iterator, Option}; use Option::{Some, None}; struct St { pub item: StItem, pub used: bool } struct StItem { pub field: u32 } impl Iterator for St { type Item: StItem; fn next(&mut self) -> Option<StItem> { if self.used { self.used = false; return Some(self.item); } None } } fn main() { let it = St { text: StItem { field: 22 }, used: false } for item in it { item.fie~ } } "; let dir = TmpDir::new(); dir.write_file("mymod.rs", modsrc); let got = get_one_completion(src, Some(dir)); assert_eq!(got.matchstr, "field"); } #[test] fn completes_for_vec_iter_field_and_method() { let modsrc = " pub trait Iterator { type Item; fn next(&mut self) -> Option<Self::Item>; } pub trait IntoIterator { type Item; type IntoIter: Iterator<Item=Self::Item>; fn into_iter(self) -> Self::IntoIter; } impl<T> IntoIterator for Vec<T> { type Item = T; type IntoIter = IntoIter<T>; fn into_iter(mut self) -> IntoIter<T> {} } pub struct IntoIter<T> {} impl<T> Iterator for IntoIter<T> { type Item = T; fn next(&mut self) -> Option<T> {} } impl<I: Iterator> IntoIterator for I { type Item = I::Item; type IntoIter = I; fn into_iter(self) -> I { self } } pub struct Vec<T> {} pub enum Option<T> { None, Some(T) } "; let src = " pub mod mymod; use mymod::{Vec, IntoIter, IntoIterator, Option}; use Option::{Some, None}; struct St { stfield: i32, } impl St { pub fn stmethod(&self) -> u32 {2} } fn main() { let mut arr: Vec<St> = Vec::new(); arr.push( St{stfield: 4} ); for it in arr.iter() { it.stf it.stm } } "; let dir = TmpDir::new(); dir.write_file("mymod.rs", modsrc); let path = dir.write_file("src.rs", src); let cache = racer::FileCache::default(); let session = racer::Session::new(&cache); let cursor1 = Coordinate { line: 22, column: 18 }; let got1 = complete_from_file(&path, cursor1, &session).nth(0).unwrap(); println!("{:?}", got1); assert_eq!("stfield", got1.matchstr); let cursor2 = Coordinate { line: 23, column: 18 }; let got2 = complete_from_file(&path, cursor2, &session).nth(0).unwrap(); println!("{:?}", got2); assert_eq!("stmethod", got2.matchstr); } #[test] fn completes_trait_methods_when_at_scope_end() { let src = " mod sub { pub trait Trait { fn traitf() -> bool; fn traitm(&self) -> bool; } impl Trait for Foo { fn traitf() -> bool { false } fn traitm(&self) -> bool { true } } pub struct Foo(pub bool); } fn main() { // l16 let t = sub::Foo(true); sub::Foo:: t.t } "; let f = TmpFile::new(src); let path = f.path(); let cache = racer::FileCache::default(); let session = racer::Session::new(&cache); let cursor1 = Coordinate { line: 18, column: 18 }; let got1 = complete_from_file(&path, cursor1, &session).nth(0).unwrap(); let cursor2 = Coordinate { line: 19, column: 11 }; let got2 = complete_from_file(&path, cursor2, &session).nth(0).unwrap(); println!("{:?}", got1); println!("{:?}", got2); assert_eq!(got1.matchstr, "traitf"); assert_eq!(got2.matchstr, "traitm"); assert_eq!(got1.contextstr, "fn traitf() -> bool"); assert_eq!(got2.contextstr, "fn traitm(&self) -> bool"); } #[test] fn follows_use() { let src1 = " pub fn myfn() {} pub fn foo() {} "; let src = " use src1::{foo,myfn}; mod src1; fn main() { myfn~(); } "; let dir = TmpDir::new(); dir.write_file("src1.rs", src1); let got = get_definition(src, Some(dir)); assert_eq!(got.matchstr, "myfn"); assert_eq!(got.contextstr, "pub fn myfn()"); } #[test] fn follows_use_as() { let src2 = " pub fn myfn() {} pub fn foo() {} "; let src = " use src2::myfn as myfoofn; mod src2; fn main() { my~foofn(); } "; let dir = TmpDir::new(); dir.write_file("src2.rs", src2); let got = get_definition(src, Some(dir)); assert_eq!(got.matchstr, "myfn"); } #[test] fn follows_use_glob() { let src3 = " pub fn myfn() {} pub fn foo() {} "; let src = " use src3::*; mod src3; fn main() { my~fn(); } "; let dir = TmpDir::new(); dir.write_file("src3.rs", src3); let got = get_definition(src, Some(dir)); assert_eq!(got.matchstr, "myfn"); } #[test] fn follows_multiple_use_globs() { let src1 = " pub fn src1fn() {} "; let src2 = " pub fn src2fn() {} "; let src =" use multiple_glob_test1::*; use multiple_glob_test2::*; mod multiple_glob_test1; mod multiple_glob_test2; src~ "; let dir = TmpDir::new(); dir.write_file("multiple_glob_test1.rs", src1); dir.write_file("multiple_glob_test2.rs", src2); let mut has_1 = false; let mut has_2 = false; let completions = get_all_completions(src, Some(dir)); for m in completions { if m.matchstr == "src1fn" { has_1 = true; } if m.matchstr == "src2fn" { has_2 = true; } } assert!(has_1 && has_2); } #[test] fn finds_external_mod_docs() { let src1 = "// Copyright notice //! The mods multiline //! documentation "; let src = " mod external_mod; use external_mod; fn main() { external_mod~ } "; let dir = TmpDir::new(); dir.write_file("external_mod.rs", src1); let got = get_one_completion(src, Some(dir)); assert_eq!("external_mod", got.matchstr); assert_eq!("The mods multiline\ndocumentation", got.docs); } #[test] fn finds_external_struct_docs() { let src1 = " /// Orange /// juice pub struct Apple { pub a: u8, }"; let src = " use external_struct::Apple; mod external_struct; fn main() { Apple~ }"; let dir = TmpDir::new(); dir.write_file("external_struct.rs", src1); let got = get_one_completion(src, Some(dir)); assert_eq!("Apple", got.matchstr); assert_eq!("Orange\njuice", got.docs); } #[test] fn finds_external_fn_docs() { let src1 = " /// Orange /// juice pub fn apple() { let x = 1; }"; let src = " use external_fn::apple; mod external_fn; fn main() { apple~ }"; let dir = TmpDir::new(); dir.write_file("external_fn.rs", src1); let got = get_one_completion(src, Some(dir)); assert_eq!("apple", got.matchstr); assert_eq!("Orange\njuice", got.docs); } #[test] fn follows_use_local_package() { let src = " extern crate fixtures; use fixtures::~ "; let got = get_one_completion(src, None); assert_eq!(got.matchstr, "foo"); } #[test] fn completes_struct_field_via_assignment() { let src = " struct Point { first: f64, second: f64 } let var = Point {first: 35, second: 22}; var.f~ "; let got = get_one_completion(src, None); assert_eq!(got.matchstr, "first"); } #[test] fn finds_defn_of_struct_field() { let src = " struct Point { first: f64, second: f64 } let var = Point {first: 35, second: 22}; var.f~irst "; let got = get_definition(src, None); assert_eq!(got.matchstr, "first"); } #[test] fn finds_impl_fn() { let src = " struct Foo; impl Foo { fn new() {} } Foo::n~ew(); "; let got = get_definition(src, None); assert_eq!(got.matchstr, "new"); } #[test] fn follows_use_to_inline_mod() { let src = " use foo::myfn; mod foo { pub fn myfn() {} } fn main() { m~yfn(); } "; let got = get_definition(src, None); assert_eq!(got.matchstr, "myfn"); } #[test] fn struct_field_scalar_primitive_types() { let src = " struct Foo<'a> { reference: &'a u8, array: [u8; 5], slice: &'a [u8], } fn foo(x: Foo) { x.~ } "; let completions = get_all_completions(src, None); assert_eq!(completions.len(), 3); for completion in completions { println!("match: {:?}", completion); let expected = match completion.matchstr.as_ref() { "reference" => "&u8", "array" => "[u8; 5]", "slice" => "&[u8]", _ => panic!("unexpected match from Foo struct ({})", completion.matchstr) }; assert_eq!(completion.contextstr, expected); } } #[test] fn finds_enum() { let src = " enum MyEnum { One, Two } fn myfn(e: M~yEnum) {} "; let got = get_definition(src, None); assert_eq!(got.matchstr, "MyEnum"); } #[test] fn finds_type() { let src = " type SpannedIdent = Spanned<Ident> S~pannedIdent; "; let got = get_definition(src, None); assert_eq!(got.matchstr, "SpannedIdent"); } #[test] fn finds_trait() { let src = " pub trait MyTrait<E: Clone> {} M~yTrait "; let got = get_definition(src, None); assert_eq!(got.matchstr, "MyTrait"); assert_eq!(got.contextstr, "pub trait MyTrait<E: Clone>"); } #[test] fn finds_macro() { let src = " macro_rules! my_macro { () => {} } m~y_macro!(); "; let got = get_definition(src, None); assert_eq!(got.matchstr, "my_macro!"); } #[test] fn finds_extern_crate() { let src = " extern crate fixtures; f~ixtures "; let got = get_definition(src, None); assert_eq!(got.matchstr, "fixtures"); } #[test] fn finds_fn_arg() { let src = " fn myfn(myarg: &str) { my~arg } "; let got = get_definition(src, None); assert_eq!(got.matchstr, "myarg"); } #[test] fn finds_fn_arg_in_incomplete_fn() { let src = " fn myfn(myarg: &str) { my~arg "; let got = get_definition(src, None); assert_eq!(got.matchstr, "myarg"); } #[test] fn finds_inline_fn() { let src = " #[inline] fn contains<'a>(&needle: &'a str) -> bool { } conta~ins(); "; let got = get_definition(src, None); assert_eq!(got.matchstr, "contains"); assert_eq!(got.contextstr, "fn contains<'a>(&needle: &'a str) -> bool"); } #[test] fn follows_self_use() { let modsrc = " pub use self::src4::{Foo,myfn}; pub mod src4; "; let src4 = " struct Foo; pub fn myfn() {} "; let src = " use mymod::{Foo,myfn}; pub mod mymod; fn main() { my~fn(); } "; let dir = TmpDir::new(); let mymod = TmpDir::with_path(dir.path().join("mymod")); mymod.write_file("mod.rs", modsrc); let src4path = mymod.write_file("src4.rs", src4); let got = get_definition(src, Some(dir)); assert_eq!(got.matchstr, "myfn"); assert_eq!(src4path, got.filepath); assert_eq!(28, got.point); } #[test] fn finds_nested_submodule_file() { let sub3src = " pub fn myfn() {} "; let src = " pub mod sub1 { pub mod sub2 { pub mod sub3; } } sub1::sub2::sub3::m~yfn(); "; let dir = TmpDir::new(); let sub2name = dir.path().join("sub1").join("sub2"); let _sub2dir = TmpDir::with_path(&sub2name); let src3 = TmpFile::with_path(&sub2name.join("sub3.rs"), sub3src); let got = get_definition(src, Some(dir)); assert_eq!(got.matchstr, "myfn"); assert_eq!(src3.path(), got.filepath); } #[test] fn follows_super_in_sub_module() { let src = " pub fn iamhere() { } mod inner { pub use super::ia~mhere; } "; let got = get_definition(src, None); assert_eq!("iamhere", got.matchstr); } #[test] fn follows_super_in_local_sub_module() { let src = " mod inner { pub fn iamhere() { } mod inner2 { pub use super::iamh~ere; } } "; let got = get_definition(src, None); assert_eq!("iamhere", got.matchstr); } #[test] fn follows_use_to_impl() { let modsrc = " pub struct Foo; impl Foo { // impl doesn't need to be 'pub' pub fn new() -> Foo { Foo } } "; let src = " use mymod::{Foo}; mod mymod; fn main() { Foo::n~ew(); } "; let dir = TmpDir::new(); let mod_path = dir.write_file("mymod.rs", modsrc); let got = get_definition(src, Some(dir)); assert_eq!(got.matchstr, "new"); assert_eq!(90, got.point); assert_eq!(mod_path, got.filepath); } #[test] fn finds_templated_impl_fn() { let src = " struct Foo<T>; impl<T> Foo<T> { fn new() {} } Foo::n~ew(); "; let got = get_definition(src, None); assert_eq!(got.matchstr, "new"); } #[test] fn follows_fn_to_method() { let src = " struct Foo<T>; impl<T> Foo<T> { fn new() -> Foo<T> {} fn mymethod(&self) {} } fn main() { let v = Foo::new(); v.my~ } "; let got = get_one_completion(src, None); assert_eq!("mymethod", got.matchstr); } #[test] fn simple_struct_contextstr() { let src = " struct Foo<T>; fn myfn() { let x: Foo~ } "; let got = get_one_completion(src, None); assert_eq!(got.contextstr, "struct Foo<T>;"); } #[test] fn struct_contextstr() { let src = " struct Foo<T> { pub fn foo1(); } fn myfn() { let x: Foo~ } "; let got = get_one_completion(src, None); assert_eq!(got.contextstr, "struct Foo<T>"); } #[test] fn follows_arg_to_method() { let src = " struct Foo<T>; impl<T> Foo<T> { fn mymethod(&self) {} } fn myfn(v: &Foo) { v.my~ } "; let got = get_one_completion(src, None); assert_eq!("mymethod", got.matchstr); } #[test] fn follows_arg_to_enum_method() { let src = " enum Foo<T> { EnumVal } impl<T> Foo<T> { fn mymethod(&self) {} } fn myfn(v: &Foo) { v.my~ } "; let got = get_one_completion(src, None); assert_eq!("mymethod", got.matchstr); } #[test] fn follows_let_method_call() { let src = " struct Foo; struct Bar; impl Foo { fn mymethod(&self) -> Bar {} } impl Bar { fn mybarmethod(&self) -> Bar {} } fn myfn(v: &Foo) { let f = v.mymethod(); f.my~ } "; let got = get_one_completion(src, None); assert_eq!("mybarmethod", got.matchstr); } #[test] fn follows_chained_method_call() { let src = " struct Foo; struct Bar; impl<T> Foo<T> {<|fim▁hole|> fn mybarmethod(&self) -> Bar {} } fn myfn(v: &Foo) { v.mymethod().my~ } "; let got = get_one_completion(src, None); assert_eq!("mybarmethod", got.matchstr); } #[test] fn follows_chained_method_call_returning_self() { let src = " struct Foo; impl Foo { fn mymethod(&self) {} fn new() -> Self {} } Foo::new().~ "; let got = get_only_completion(src, None); assert_eq!("mymethod", got.matchstr); } #[test] fn discards_inner_fns() { let src = " struct Foo; impl<T> Foo<T> { fn mymethod(&self) -> Bar { fn inner() { } } } fn myfn(v: &Foo) { v.i~ } "; let got = get_all_completions(src, None); assert!(got.is_empty(), "should not match inner function"); } #[test] fn differentiates_type_and_value_namespaces() { let src = " enum MyEnum{ Foo } struct Foo; impl Foo { pub fn new() -> Foo {} } let l = Foo::n~ew(); "; let got = get_definition(src, None); println!("{}", got.matchstr); println!("{:?}", got.mtype); assert_eq!("new", got.matchstr); } #[test] fn follows_self_to_method() { let src = " struct Foo; impl Bar for Foo { pub fn method(self) { } pub fn another_method(self, feio: uint) { self.met~hod() } }"; let got = get_definition(src, None); assert_eq!("method", got.matchstr); } #[test] #[ignore] fn follows_self_to_method_when_call_on_new_line() { let src = " struct Foo; impl Bar for Foo { pub fn method(self) -> Foo { } pub fn another_method(self, feio: uint) { self.method() .met~hod() } }"; let got = get_definition(src, None); assert_eq!("method", got.matchstr); } #[test] fn follows_self_to_trait_method() { let src = " trait Bar { pub fn method(self) { } pub fn another_method(self) { self.met~hod() } }"; let got = get_definition(src, None); assert_eq!("method", got.matchstr); } #[test] fn finds_trait_method() { let src = " pub trait MyTrait { fn op(self); fn trait_method(self){} } struct Foo; impl MyTrait for Foo { fn op(self) { self.trait~_method(); } }"; let got = get_definition(src, None); assert_eq!("trait_method", got.matchstr); } #[test] fn finds_field_type() { let src = " pub struct Blah { subfield: uint } pub struct Foo { myfield : Blah } let f = Foo{ myfield: Blah { subfield: 3}}; f.myfield.subfi~eld "; let got = get_definition(src, None); assert_eq!("subfield", got.matchstr); } #[test] fn finds_a_generic_retval_from_a_function() { let src = " pub struct Blah { subfield: uint } pub struct Foo<T> { myfield: T } fn myfn() -> Foo<Blah> {} myfn().myfield.subfi~eld "; let got = get_definition(src, None); assert_eq!("subfield", got.matchstr); } #[test] fn handles_an_enum_option_style_return_type() { let src = " pub struct Blah { subfield: uint } pub enum MyOption<T> { MySome(T), MyNone } impl MyOption<T> { pub fn unwrap(&self) -> T {} } fn myfn() -> MyOption<Blah> {} let s = myfn(); s.unwrap().sub~field "; let got = get_definition(src, None); assert_eq!("subfield", got.matchstr); } #[test] fn finds_definition_of_const() { let src = " pub const MYCONST:uint = 3; MYC~ONST "; let got = get_definition(src, None); assert_eq!("MYCONST", got.matchstr); } #[test] fn finds_definition_of_static() { let src = " pub static MYSTATIC:uint = 3; MYS~TATIC "; let got = get_definition(src, None); assert_eq!("MYSTATIC", got.matchstr); } #[test] fn handles_dotdot_before_searchstr() { let src = " static MYLEN:uint = 30; let f = [0i32, ..M~YLEN]; "; let got = get_definition(src, None); assert_eq!("MYLEN", got.matchstr); } #[test] #[ignore] fn finds_definition_of_lambda_argument() { let src = " fn myfn(&|int|) {} myfn(|a|~a+3); "; let got = get_definition(src, None); assert_eq!("a", got.matchstr); } #[test] fn finds_definition_of_let_tuple() { let src = " let (a, b) = (2,3); ~a "; let got = get_definition(src, None); assert_eq!("a", got.matchstr); } #[test] fn finds_type_of_tuple_member_via_let_type() { let src = " pub struct Blah { subfield: uint } let (a, b): (uint, Blah); b.subfi~eld "; let got = get_definition(src, None); assert_eq!("subfield", got.matchstr); } #[test] fn finds_type_of_tuple_member_via_let_expr() { let src = " pub struct Blah { subfield: uint } let (a, b) = (3, Blah{subfield:3}); b.subfi~eld "; let got = get_definition(src, None); assert_eq!("subfield", got.matchstr); } #[test] fn finds_type_of_tuple_member_via_fn_retval() { let src = " pub struct Blah { subfield: uint } fn myfn() -> (uint, Blah) {} let (a, b) = myfn(); b.subfi~eld "; let got = get_definition(src, None); assert_eq!("subfield", got.matchstr); } #[test] fn finds_type_of_tuple_member_in_fn_arg() { let src = " pub struct Blah { subfield: uint } fn myfn(a: uint, (b, c): (uint, Blah)) { c.s~ubfield } "; let got = get_definition(src, None); assert_eq!("subfield", got.matchstr); } #[test] fn finds_namespaced_enum_variant() { let src = " pub enum Blah { MyVariant } Blah::MyVa~riant "; let got = get_definition(src, None); assert_eq!("MyVariant", got.matchstr); } #[test] fn finds_glob_imported_enum_variant() { let src = " use self::Blah::*; pub enum Blah { MyVariant, MyVariant2 } MyVa~riant "; let got = get_definition(src, None); assert_eq!("MyVariant", got.matchstr); } #[test] #[ignore] fn uses_generic_arg_to_resolve_trait_method() { let src = " pub trait MyTrait { fn trait_method(self){} } pub fn doit<T:MyTrait>(stream: &mut T) { T.trait_met~hod } "; let got = get_definition(src, None); assert_eq!("trait_method", got.matchstr); } #[test] fn destructures_a_tuplestruct() { let src = " pub struct Blah { subfield: uint } pub struct TupleStruct(Blah); let TupleStruct(var) = TupleStruct(Blah{subfield:35}); var.su~bfield "; let got = get_definition(src, None); assert_eq!("subfield", got.matchstr); } #[test] fn destructures_a_tuplestruct_with_generic_arg() { let src = " pub struct Blah { subfield: uint } pub struct TupleStruct<T>(T); let a : TupleStruct<Blah> = TupleStruct(Blah{subfield:35}); let TupleStruct(var) = a; var.su~bfield "; let got = get_definition(src, None); assert_eq!("subfield", got.matchstr); } #[test] fn finds_if_let_ident_defn() { let src = " if let MyOption(myvar) = myvar { myvar~ } "; let got = get_only_completion(src, None); assert_eq!("myvar", got.matchstr); } #[test] fn doesnt_find_if_let_if_not_in_the_subscope() { let src = " let myvar = 3u32; if let MyOption(myvar) = myvar { myvar } my~var "; let got = get_definition(src, None); assert_eq!("myvar", got.matchstr); assert_eq!(9, got.point); } #[test] fn finds_rebound_var_in_iflet() { let src = " let o: MyOption<Blah>; if let MyOption::MySome(o) = o { ~o } "; let got = get_definition(src, None); assert_eq!(56, got.point); } #[test] fn handles_if_let() { let src = " pub struct Blah { subfield: uint } pub enum MyOption<T> { MySome(T), MyNone } let o: MyOption<Blah>; if let MyOption::MySome(a) = o { a.sub~field } "; let got = get_definition(src, None); assert_eq!("subfield", got.matchstr); } #[test] fn handles_if_let_as_expression() { let src = " pub struct Blah { subfield: uint } pub enum MyOption<T> { MySome(T), MyNone } let o: MyOption<Blah>; let foo = if let MyOption::MySome(a) = o { // iflet is an expression a.sub~field }; "; let got = get_definition(src, None); assert_eq!("subfield", got.matchstr); } #[test] fn finds_match_arm_var() { let src = " match foo { Some(a) => ~a "; let got = get_definition(src, None); assert_eq!("a", got.matchstr); } #[test] fn finds_match_arm_var_in_scope() { let src = " match foo { Some(a) => { ~a } "; let got = get_definition(src, None); assert_eq!("a", got.matchstr); } #[test] fn finds_match_arm_enum() { let src = " enum MyEnum { Foo, Bar } match foo { MyEnum::Foo~ => 1, MyEnum::Bar => 2 "; let got = get_definition(src, None); assert_eq!("Foo", got.matchstr); } #[test] fn finds_match_arm_var_with_nested_match() { let src = " match foo { bar => {something} Some(a) => { let b = match blah { None => () } ~a } "; let got = get_definition(src, None); assert_eq!("a", got.matchstr); } #[test] fn gets_type_via_match_arm() { let src = " pub struct Blah { subfield: uint } pub enum MyOption<T> { MySome(T), MyNone } let o: MyOption<Blah>; match o { MyOption::MySome(a) => a.subfi~eld "; let got = get_definition(src, None); assert_eq!("subfield", got.matchstr); } #[test] fn handles_default_arm() { let src = " let o: MyOption<Blah>; match o { Foo => { } _ => ~o } "; let got = get_definition(src, None); assert_eq!("o", got.matchstr); assert_eq!(9, got.point); } #[test] fn doesnt_match_rhs_of_let_in_same_stmt() { let src = " let a = 3; // <--- should match this 'a' let a = ~a + 2; // not this one "; let got = get_definition(src, None); assert_eq!("a", got.matchstr); assert_eq!(9, got.point); } #[test] fn finds_unsafe_fn() { let src = " unsafe fn foo() {} fn bar() { f~oo() } "; let got = get_definition(src, None); assert_eq!(got.matchstr, "foo"); assert_eq!(got.point, 15); } #[test] fn completes_methods_on_deref_type() { let modsrc = " pub trait Deref { type Target: ?Sized; fn deref(&self) -> &Self::Target; } pub struct B { c: C, } pub struct C; pub trait GetOne { fn one(&self) -> u32 { 1u32 } } impl GetOne for C {} impl Deref for B { type Target = C; fn deref(&self) -> &C { &self.c } } "; let src = " mod mymod; use mymod::{B, C, GetOne}; fn main() { let b: B = B{ c: C}; b.o~ } "; let dir = TmpDir::new(); dir.write_file("mymod.rs", modsrc); let got = get_one_completion(src, Some(dir)); assert_eq!(got.matchstr, "one"); } #[test] fn finds_self_param_when_fn_has_generic_closure_arg() { // issue #508 let src = " struct MyOption; impl MyOption { // needs to find 'self' here to see it is a method pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> Option<U> { } } let a: MyOption; a.~map() "; let got = get_definition(src, None); assert_eq!("map", got.matchstr); } #[test] fn completes_methods_on_deref_generic_type() { let modsrc = " pub trait Deref { type Target: ?Sized; fn deref(&self) -> &Self::Target; } pub struct B<T> { c: T, } pub struct C; pub trait GetOne { fn one(&self) -> u32 { 1u32 } } impl GetOne for C {} impl<T> Deref for B<T> { type Target = T; fn deref(&self) -> &T { &self.c } } "; let src = " mod mymod; use mymod::{B, C, GetOne}; fn main() { let b: B<C> = B{ c: C}; b.o~ } "; let dir = TmpDir::new(); dir.write_file("mymod.rs", modsrc); let got = get_one_completion(src, Some(dir)); assert_eq!(got.matchstr, "one"); } #[test] fn completes_multiple_use_bracket() { // issue # 96 // wo: without bracket, wi: with bracket let modfile = " pub struct StarWars { pub Vadar: u8, }; pub struct StarTrek { pub Spock: u8, };"; let srcwo = " mod modfile1; use modfile1::~S "; let srcwi = " mod modfile1; use modfile1::{~S "; let dir = TmpDir::new(); dir.write_file("modfile1.rs", modfile); let gotwo = get_all_completions(srcwo, Some(dir)); let dir = TmpDir::new(); dir.write_file("modfile1.rs", modfile); let gotwi = get_all_completions(srcwi, Some(dir)); assert_eq!(gotwo.len(), gotwi.len()); for (wo, wi) in gotwo.into_iter().zip(gotwi) { assert_eq!(wo.matchstr, wi.matchstr); } } #[test] fn completes_multiple_use_comma() { // issue # 96 // wo: without comma, wi: with comma let modfile = " pub struct StarWars { pub Kenobi: u8, }; pub struct StarTrek { pub Spock: u8, };"; let srcwo = " mod modfile2; use modfile2::~S "; let srcwi = " mod modfile2; use modfile2::{StarWars, ~S "; let dir = TmpDir::new(); dir.write_file("modfile2.rs", modfile); let gotwo = get_all_completions(srcwo, Some(dir)); let dir = TmpDir::new(); dir.write_file("modfile2.rs", modfile); let gotwi = get_all_completions(srcwi, Some(dir)); assert_eq!(gotwo.len(), gotwi.len()); for (wo, wi) in gotwo.into_iter().zip(gotwi) { assert_eq!(wo.matchstr, wi.matchstr); } } #[test] fn completes_trait_methods_in_trait_impl() { let src = " mod sub { pub trait Trait { fn traitf() -> bool; fn traitm(&self) -> bool; } pub struct Foo(bool); impl Trait for Foo { fn traitf() -> bool { false } fn traitm(&self) -> bool { true } } } "; let f = TmpFile::new(src); let path = f.path(); let cache = racer::FileCache::default(); let session = racer::Session::new(&cache); let cursor = Coordinate { line: 11, column: 21 }; let got = complete_from_file(&path, cursor, &session).nth(0).unwrap(); assert_eq!(got.matchstr, "traitf"); assert_eq!(got.contextstr, "fn traitf() -> bool"); let cursor = Coordinate { line: 12, column: 21 }; let got = complete_from_file(&path, cursor, &session).nth(0).unwrap(); assert_eq!(got.matchstr, "traitm"); assert_eq!(got.contextstr, "fn traitm(&self) -> bool"); } #[test] fn finds_field_with_same_name_as_method() { let src = " struct Foo { same_name: uint } impl Foo { fn same_name(&self){} } let a: Foo; a.same_na~me; "; let got = get_definition(src, None); assert_eq!("same_name", got.matchstr); assert_eq!(MatchType::StructField, got.mtype); } #[test] fn finds_method_with_same_name_as_field() { let src = " struct Foo { same_name: uint } impl Foo { fn same_name(&self){}} let a: Foo; a.same_na~me(); "; let got = get_definition(src, None); assert_eq!("same_name", got.matchstr); assert_eq!(MatchType::Function, got.mtype); }<|fim▁end|>
fn mymethod(&self) -> Bar {} } impl<T> Bar<T> {
<|file_name|>TestWinapp.py<|end_file_name|><|fim▁begin|># vim: ts=4:sw=4:expandtab # BleachBit # Copyright (C) 2008-2015 Andrew Ziem # http://bleachbit.sourceforge.net # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Test cases for module Winapp """ import os import shutil import sys import unittest sys.path.append('.') from bleachbit.Winapp import Winapp, detectos, detect_file, section2option from bleachbit.Windows import detect_registry_key import common if 'nt' == os.name: import _winreg else: def fake_detect_registry_key(f): return True import bleachbit.Windows bleachbit.Windows.detect_registry_key = fake_detect_registry_key def get_winapp2(): """Download and cache winapp2.ini. Return local filename.""" url = "http://www.winapp2.com/Winapp2.ini" tmpdir = None if 'posix' == os.name: tmpdir = '/tmp' if 'nt' == os.name: tmpdir = os.getenv('TMP') fn = os.path.join(tmpdir, 'bleachbit_test_winapp2.ini') if os.path.exists(fn): import time import stat age_seconds = time.time() - os.stat(fn)[stat.ST_MTIME] if age_seconds > (24 * 36 * 36): print 'note: deleting stale file %s ' % fn os.remove(fn) if not os.path.exists(fn): f = file(fn, 'w') import urllib2 txt = urllib2.urlopen(url).read() f.write(txt) return fn class WinappTestCase(unittest.TestCase): """Test cases for Winapp""" def run_all(self, cleaner, really_delete): """Test all the cleaner options""" for (option_id, __name) in cleaner.get_options(): for cmd in cleaner.get_commands(option_id): for result in cmd.execute(really_delete): common.validate_result(self, result, really_delete) def test_remote(self): """Test with downloaded file""" winapps = Winapp(get_winapp2()) for cleaner in winapps.get_cleaners(): self.run_all(cleaner, False) def test_detectos(self): """Test detectos function""" # Tests are in the format (required_ver, mock, expected_return) tests = (('5.1', '5.1', True), ('5.1', '6.0', False), ('6.0', '5.1', False), ('|5.1', '5.1', True), ('|5.1', '6.0', False), ('6.1|', '5.1', False), ('6.1|', '6.0', False), ('6.1|', '6.1', True), ('6.1|', '6.2', True), ('6.2|', '5.1', False), ('6.2|', '6.0', False), ('6.2|', '6.1', False), ('6.2|', '6.2', True)) for (s, mock, expected_return) in tests: actual_return = detectos(s, mock) self.assertEqual(expected_return, actual_return, 'detectos(%s, %s)==%s instead of %s' % (s, mock, actual_return, expected_return)) def test_detect_file(self): """Test detect_file function""" tests = [('%windir%\\system32\\kernel32.dll', True), ('%windir%\\system32', True), ('%ProgramFiles%\\Internet Explorer', True), ('%ProgramFiles%\\Internet Explorer\\', True), ('%windir%\\doesnotexist', False), ('%windir%\\system*', True), ('%windir%\\*ystem32', True), ('%windir%\\*ystem3*', True)] # On 64-bit Windows, Winapp2.ini expands the %ProgramFiles% environment # variable to also %ProgramW6432%, so test unique entries in # %ProgramW6432%. import struct if not 32 == 8 * struct.calcsize('P'): raise NotImplementedError('expecting 32-bit Python') if os.getenv('ProgramW6432'): dir_64 = os.listdir(os.getenv('ProgramFiles')) dir_32 = os.listdir(os.getenv('ProgramW6432')) dir_32_unique = set(dir_32) - set(dir_64) if dir_32 and not dir_32_unique: raise RuntimeError( 'Test expects objects in %ProgramW6432% not in %ProgramFiles%') for pathname in dir_32_unique: tests.append(('%%ProgramFiles%%\\%s' % pathname, True)) else: print 'NOTE: skipping %ProgramW6432% tests because WoW64 not detected' for (pathname, expected_return) in tests: actual_return = detect_file(pathname) msg = 'detect_file(%s) returned %s' % (pathname, actual_return) self.assertEqual(expected_return, actual_return, msg) def test_fake(self): """Test with fake file""" ini_fn = None keyfull = 'HKCU\\Software\\BleachBit\\DeleteThisKey' subkey = 'Software\\BleachBit\\DeleteThisKey\\AndThisKey' def setup_fake(f1_filename=None): """Setup the test environment"""<|fim▁hole|> dirname2 = os.path.join(dirname, 'sub') os.mkdir(dirname2) f2 = os.path.join(dirname2, 'deleteme.log') file(f2, 'w').write('') fbak = os.path.join(dirname, 'deleteme.bak') file(fbak, 'w').write('') self.assertTrue(os.path.exists(f1)) self.assertTrue(os.path.exists(f2)) self.assertTrue(os.path.exists(fbak)) hkey = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, subkey) hkey.Close() self.assertTrue(detect_registry_key(keyfull)) self.assertTrue(detect_registry_key('HKCU\\%s' % subkey)) return (dirname, f1, f2, fbak) def ini2cleaner(filekey, do_next=True): ini = file(ini_fn, 'w') ini.write('[someapp]\n') ini.write('LangSecRef=3021\n') ini.write(filekey) ini.write('\n') ini.close() self.assertTrue(os.path.exists(ini_fn)) if do_next: return Winapp(ini_fn).get_cleaners().next() else: return Winapp(ini_fn).get_cleaners() # reuse this path to store a winapp2.ini file in import tempfile (ini_h, ini_fn) = tempfile.mkstemp(suffix='.ini', prefix='winapp2') os.close(ini_h) # a set of tests tests = [ # single file ('FileKey1=%s|deleteme.log', None, False, True, False, True, True, True), # special characters for XML ('FileKey1=%s|special_chars_&-\'.txt', 'special_chars_&-\'.txt', False, True, False, True, True, True), # *.log ('FileKey1=%s|*.LOG', None, False, True, False, True, True, True), # semicolon separates different file types ('FileKey1=%s|*.log;*.bak', None, False, True, False, True, False, True), # *.* ('FileKey1=%s|*.*', None, False, True, False, True, False, True), # recurse *.* ('FileKey1=%s|*.*|RECURSE', None, False, True, False, False, False, True), # remove self *.*, this removes the directory ('FileKey1=%s|*.*|REMOVESELF', None, False, False, False, False, False, True), ] # Add positive detection, where the detection believes the application is present, # to all the tests, which are also positive. new_tests = [] for test in tests: for detect in ( "\nDetectFile=%%APPDATA%%\\Microsoft", "\nDetectFile1=%%APPDATA%%\\Microsoft\nDetectFile2=%%APPDATA%%\\does_not_exist", "\nDetectFile1=%%APPDATA%%\\does_not_exist\nDetectFile2=%%APPDATA%%\\Microsoft", "\nDetect=HKCU\\Software\\Microsoft", "\nDetect1=HKCU\\Software\\Microsoft\nDetect2=HKCU\\Software\\does_not_exist", "\nDetect1=HKCU\\Software\\does_not_exist\nDetect2=HKCU\\Software\\Microsoft"): new_ini = test[0] + detect new_test = [new_ini, ] + [x for x in test[1:]] new_tests.append(new_test) positive_tests = tests + new_tests # execute positive tests for test in positive_tests: print 'positive test: ', test (dirname, f1, f2, fbak) = setup_fake(test[1]) cleaner = ini2cleaner(test[0] % dirname) self.assertEqual(test[2], cleaner.auto_hide()) self.run_all(cleaner, False) self.run_all(cleaner, True) self.assertEqual(test[3], os.path.exists(dirname)) self.assertEqual(test[4], os.path.exists(f1)) self.assertEqual(test[5], os.path.exists(f2)) self.assertEqual(test[6], os.path.exists(fbak)) self.assertEqual(test[7], cleaner.auto_hide()) shutil.rmtree(dirname, True) # negative tests where the application detect believes the application # is absent for test in tests: for detect in ( "\nDetectFile=c:\\does_not_exist", # special characters for XML "\nDetectFile=c:\\does_not_exist_special_chars_&'", "\nDetectFile1=c:\\does_not_exist1\nDetectFile2=c:\\does_not_exist2", "\nDetect=HKCU\\Software\\does_not_exist", "\nDetect=HKCU\\Software\\does_not_exist_&'", "\nDetect1=HKCU\\Software\\does_not_exist1\nDetect2=HKCU\\Software\\does_not_exist1"): new_ini = test[0] + detect t = [new_ini, ] + [x for x in test[1:]] print 'negative test', t # execute the test (dirname, f1, f2, fbak) = setup_fake() cleaner = ini2cleaner(t[0] % dirname, False) self.assertRaises(StopIteration, cleaner.next) # registry key, basic (dirname, f1, f2, fbak) = setup_fake() cleaner = ini2cleaner('RegKey1=%s' % keyfull) self.run_all(cleaner, False) self.assertTrue(detect_registry_key(keyfull)) self.run_all(cleaner, True) self.assertFalse(detect_registry_key(keyfull)) # check for parse error with ampersand (dirname, f1, f2, fbak) = setup_fake() cleaner = ini2cleaner('RegKey1=HKCU\\Software\\PeanutButter&Jelly') self.run_all(cleaner, False) self.run_all(cleaner, True) def test_section2option(self): """Test for section2option()""" tests = ((' FOO2 ', 'foo2'), ('A - B (C)', 'a_b_c')) for test in tests: self.assertEqual(section2option(test[0]), test[1]) def suite(): return unittest.makeSuite(WinappTestCase) if __name__ == '__main__': unittest.main()<|fim▁end|>
dirname = tempfile.mkdtemp(prefix='bleachbit-test-winapp') f1 = os.path.join(dirname, f1_filename or 'deleteme.log') file(f1, 'w').write('')
<|file_name|>ProductSize.java<|end_file_name|><|fim▁begin|>package uk.co.nevarneyok.entities.product; import com.google.gson.annotations.SerializedName; public class ProductSize { private long id; @SerializedName("remote_id") private long remoteId; private String value; public ProductSize() { } public ProductSize(long id, long remoteId, String value) { this.id = id; this.remoteId = remoteId; this.value = value; } public long getId() { return id; } public void setId(long id) { this.id = id; } public long getRemoteId() { return remoteId; } <|fim▁hole|> this.remoteId = remoteId; } public String getValue() { return value; } public void setValue(String value) { this.value = value; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ProductSize that = (ProductSize) o; if (id != that.id) return false; if (remoteId != that.remoteId) return false; return !(value != null ? !value.equals(that.value) : that.value != null); } @Override public int hashCode() { int result = (int) (id ^ (id >>> 32)); result = 31 * result + (int) (remoteId ^ (remoteId >>> 32)); result = 31 * result + (value != null ? value.hashCode() : 0); return result; } @Override public String toString() { return "ProductSize{" + "id=" + id + ", remoteId=" + remoteId + ", value='" + value + '\'' + '}'; } }<|fim▁end|>
public void setRemoteId(long remoteId) {
<|file_name|>test_meta.py<|end_file_name|><|fim▁begin|>""" Tests for the integration test suite itself. """ import logging import os import subprocess from collections import defaultdict from pathlib import Path from typing import Set import yaml from get_test_group import patterns_from_group __maintainer__ = 'adam' __contact__ = '[email protected]' log = logging.getLogger(__file__) def _tests_from_pattern(ci_pattern: str) -> Set[str]: """ From a CI pattern, get all tests ``pytest`` would collect. """ tests = set([]) # type: Set[str] args = [ 'pytest', '--disable-pytest-warnings', '--collect-only', ci_pattern, '-q', ] # Test names will not be in ``stderr`` so we ignore that. result = subprocess.run( args=args, stdout=subprocess.PIPE, env={**os.environ, **{'PYTHONIOENCODING': 'UTF-8'}}, ) output = result.stdout for line in output.splitlines(): if b'error in' in line: message = ( 'Error collecting tests for pattern "{ci_pattern}". ' 'Full output:\n' '{output}' ).format( ci_pattern=ci_pattern, output=output, ) raise Exception(message) # Whitespace is important to avoid confusing pytest warning messages # with test names. For example, the pytest output may contain '3 tests # deselected' which would conflict with a test file called # test_agent_deselected.py if we ignored whitespace.<|fim▁hole|> b' warnings' not in line and # Some tests are skipped on collection. b'skipped in' not in line and # Some tests are deselected by the ``pytest.ini`` configuration. b' deselected' not in line and not line.startswith(b'no tests ran in') ): tests.add(line.decode()) return tests def test_test_groups() -> None: """ The test suite is split into various "groups". This test confirms that the groups together contain all tests, and each test is collected only once. """ test_group_file = Path('test_groups.yaml') test_group_file_contents = test_group_file.read_text() test_groups = yaml.load(test_group_file_contents)['groups'] test_patterns = [] for group in test_groups: test_patterns += patterns_from_group(group_name=group) # Turn this into a list otherwise we can't cannonically state whether every test was collected _exactly_ once :-) tests_to_patterns = defaultdict(list) # type: Mapping[str, List] for pattern in test_patterns: tests = _tests_from_pattern(ci_pattern=pattern) for test in tests: tests_to_patterns[test].append(pattern) errs = [] for test_name, patterns in tests_to_patterns.items(): message = ( 'Test "{test_name}" will be run once for each pattern in ' '{patterns}. ' 'Each test should be run only once.' ).format( test_name=test_name, patterns=patterns, ) if len(patterns) != 1: assert len(patterns) != 1, message errs.append(message) if errs: for message in errs: log.error(message) raise Exception("Some tests are not collected exactly once, see errors.") all_tests = _tests_from_pattern(ci_pattern='') assert tests_to_patterns.keys() - all_tests == set() assert all_tests - tests_to_patterns.keys() == set()<|fim▁end|>
if ( line and # Some tests show warnings on collection.
<|file_name|>upload.js<|end_file_name|><|fim▁begin|>//Settings actype = ['image/png','image/jpeg','image/jpg','image/gif']; /* Accepted mime type */ maxweight = 819200; /* Max file size in octets */ maxwidth = 150; /* Max width of the image */ maxheight = 150; /* Max height*/ //Caching variable selector ish = $('.ish'); /* On attach element hide or show */ msgUp = $('.msgUp'); /* container message, infos, error... */ filezone = $('.filezone'); /* Selector filezone label */ fileprev = $('.filezone').children('img'); /* Selector img element */ filesend = $('#filesend'); /* Selector input file (children label) */ fileup = $('#fileup'); /* Selector button submit */ reset = $('#reset'); /* Selector button reset */ ish.hide(); /* Initial hide */ $(':file').change(function(e) { //Cancel the default execution e.preventDefault(); //Full file file = this.files[0]; var filer = new FileReader; filer.onload = function() { //Get size and type var aType = file.type; var aSize = file.size; //Check the file size if(aSize > maxweight) { msgUp.text('To large, maximum'+ maxweight +' bytes'); return; } //Check the file type if($.inArray(aType, actype) === -1) { msgUp.text('File type not allowed'); return; } //Set src / preview fileprev.attr('src', filer.result); //Make new Image for get the width / height var image = new Image(); image.src = filer.result;<|fim▁hole|> image.onload = function() { //Set width / height aWidth = image.width; aHeight = image.height; //Check width if(aWidth > maxwidth) { msgUp.text('Maximum' + maxwidth +' width allowed'); return; } //Check height if(aHeight > maxheight) { msgUp.text('Maximum' + maxheight +' height allowed'); return; } //Success of every check, display infos about the image and show up the <img> tag msgUp.html('Size :'+ aSize +' bytes<br>Filetype : '+ aType +'<br>Width :'+ aWidth +' px<br>Height: '+ aHeight +' px'); ish.show(); filesend.addClass('lock').css('height','0%'); //End image }; //End filer }; //File is up filer.readAsDataURL(file); }); //input file prevent on lock $(document).off('click', '#filesend'); $(document).on('click', '#filesend', function(e) { //Cancel the default execution if img ready to be send to php if($(this).hasClass('lock')) e.preventDefault(); }); //On reset $(document).off('click', '#reset'); $(document).on('click', '#reset', function(e) { //Cancel the default execution e.preventDefault(); //Remove the href link fileprev.attr('src', ''); //Set default message msgUp.text('Drop your avatar !'); //Remove the lock of the input file if(filesend.hasClass('lock')) filesend.css('height','100%').removeClass(); //Set default reset value $(this).val('Clear'); //Back to hide ish.hide(); }); //On fileup $(document).off('click', '#fileup'); $(document).on('click', '#fileup', function(e) { //Cancel the default execution e.preventDefault(); //Set variable which contain the entiere form / field var filesfm = new FormData(document.querySelector("form")); $.ajax({ url: 'upload.php', //Server side script (php...) type: 'POST', data:filesfm, processData: false, //Avoid jquery process contentType: false //Avoid set content type (done by var filesfm) }).done(function(msg) { //Hide the button upload fileup.hide(); //Change the text reset button (make as reinitialize the form) reset.val('Upload again !'); //On success upload if(msg === 'err') msgUp.text('Something went wrong, try again.'); //That should not happen ! else msgUp.text('Success, your file is available '+ msg); //Add the url of your file except the filename generated }); });<|fim▁end|>
<|file_name|>late.rs<|end_file_name|><|fim▁begin|>//! Implementation of lint checking. //! //! The lint checking is mostly consolidated into one pass which runs //! after all other analyses. Throughout compilation, lint warnings //! can be added via the `add_lint` method on the Session structure. This //! requires a span and an ID of the node that the lint is being added to. The //! lint isn't actually emitted at that time because it is unknown what the //! actual lint level at that location is. //! //! To actually emit lint warnings/errors, a separate pass is used. //! A context keeps track of the current state of all lint levels. //! Upon entering a node of the ast which can modify the lint settings, the //! previous lint state is pushed onto a stack and the ast is then recursed //! upon. As the ast is traversed, this keeps track of the current lint level //! for all lint attributes. use crate::{passes::LateLintPassObject, LateContext, LateLintPass, LintStore}; use rustc_ast as ast; use rustc_data_structures::sync::{join, par_iter, ParallelIterator}; use rustc_hir as hir; use rustc_hir::def_id::LocalDefId; use rustc_hir::intravisit as hir_visit; use rustc_hir::intravisit::Visitor; use rustc_middle::hir::map::Map; use rustc_middle::ty::{self, TyCtxt}; use rustc_session::lint::LintPass; use rustc_span::symbol::Symbol; use rustc_span::Span; use std::any::Any; use std::cell::Cell; use std::slice; use tracing::debug; /// Extract the `LintStore` from the query context. /// This function exists because we've erased `LintStore` as `dyn Any` in the context. crate fn unerased_lint_store(tcx: TyCtxt<'_>) -> &LintStore { let store: &dyn Any = &*tcx.lint_store; store.downcast_ref().unwrap() } macro_rules! lint_callback { ($cx:expr, $f:ident, $($args:expr),*) => ({ $cx.pass.$f(&$cx.context, $($args),*); }) } struct LateContextAndPass<'tcx, T: LateLintPass<'tcx>> { context: LateContext<'tcx>, pass: T, } impl<'tcx, T: LateLintPass<'tcx>> LateContextAndPass<'tcx, T> { /// Merge the lints specified by any lint attributes into the /// current lint context, call the provided function, then reset the /// lints in effect to their previous state. fn with_lint_attrs<F>(&mut self, id: hir::HirId, f: F) where F: FnOnce(&mut Self), { let attrs = self.context.tcx.hir().attrs(id); let prev = self.context.last_node_with_lint_attrs; self.context.last_node_with_lint_attrs = id; self.enter_attrs(attrs); f(self); self.exit_attrs(attrs); self.context.last_node_with_lint_attrs = prev; } fn with_param_env<F>(&mut self, id: hir::HirId, f: F) where F: FnOnce(&mut Self), { let old_param_env = self.context.param_env; self.context.param_env = self.context.tcx.param_env(self.context.tcx.hir().local_def_id(id)); f(self); self.context.param_env = old_param_env; } fn process_mod(&mut self, m: &'tcx hir::Mod<'tcx>, s: Span, n: hir::HirId) { lint_callback!(self, check_mod, m, s, n); hir_visit::walk_mod(self, m, n); lint_callback!(self, check_mod_post, m, s, n); } fn enter_attrs(&mut self, attrs: &'tcx [ast::Attribute]) { debug!("late context: enter_attrs({:?})", attrs); lint_callback!(self, enter_lint_attrs, attrs); } fn exit_attrs(&mut self, attrs: &'tcx [ast::Attribute]) { debug!("late context: exit_attrs({:?})", attrs); lint_callback!(self, exit_lint_attrs, attrs); } } impl<'tcx, T: LateLintPass<'tcx>> hir_visit::Visitor<'tcx> for LateContextAndPass<'tcx, T> { type Map = Map<'tcx>; /// Because lints are scoped lexically, we want to walk nested /// items in the context of the outer item, so enable /// deep-walking. fn nested_visit_map(&mut self) -> hir_visit::NestedVisitorMap<Self::Map> { hir_visit::NestedVisitorMap::All(self.context.tcx.hir()) } fn visit_nested_body(&mut self, body_id: hir::BodyId) { let old_enclosing_body = self.context.enclosing_body.replace(body_id); let old_cached_typeck_results = self.context.cached_typeck_results.get(); // HACK(eddyb) avoid trashing `cached_typeck_results` when we're // nested in `visit_fn`, which may have already resulted in them // being queried. if old_enclosing_body != Some(body_id) { self.context.cached_typeck_results.set(None); } let body = self.context.tcx.hir().body(body_id); self.visit_body(body); self.context.enclosing_body = old_enclosing_body; // See HACK comment above. if old_enclosing_body != Some(body_id) { self.context.cached_typeck_results.set(old_cached_typeck_results); } } fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) { self.with_lint_attrs(param.hir_id, |cx| { lint_callback!(cx, check_param, param); hir_visit::walk_param(cx, param); }); } fn visit_body(&mut self, body: &'tcx hir::Body<'tcx>) { lint_callback!(self, check_body, body); hir_visit::walk_body(self, body); lint_callback!(self, check_body_post, body); } fn visit_item(&mut self, it: &'tcx hir::Item<'tcx>) { let generics = self.context.generics.take(); self.context.generics = it.kind.generics(); let old_cached_typeck_results = self.context.cached_typeck_results.take(); let old_enclosing_body = self.context.enclosing_body.take(); self.with_lint_attrs(it.hir_id(), |cx| { cx.with_param_env(it.hir_id(), |cx| { lint_callback!(cx, check_item, it); hir_visit::walk_item(cx, it); lint_callback!(cx, check_item_post, it); }); }); self.context.enclosing_body = old_enclosing_body; self.context.cached_typeck_results.set(old_cached_typeck_results); self.context.generics = generics; } fn visit_foreign_item(&mut self, it: &'tcx hir::ForeignItem<'tcx>) { self.with_lint_attrs(it.hir_id(), |cx| { cx.with_param_env(it.hir_id(), |cx| { lint_callback!(cx, check_foreign_item, it); hir_visit::walk_foreign_item(cx, it); lint_callback!(cx, check_foreign_item_post, it); }); }) } fn visit_pat(&mut self, p: &'tcx hir::Pat<'tcx>) { lint_callback!(self, check_pat, p); hir_visit::walk_pat(self, p); } fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) { self.with_lint_attrs(e.hir_id, |cx| { lint_callback!(cx, check_expr, e); hir_visit::walk_expr(cx, e); lint_callback!(cx, check_expr_post, e); }) } fn visit_stmt(&mut self, s: &'tcx hir::Stmt<'tcx>) { // See `EarlyContextAndPass::visit_stmt` for an explanation // of why we call `walk_stmt` outside of `with_lint_attrs` self.with_lint_attrs(s.hir_id, |cx| { lint_callback!(cx, check_stmt, s); }); hir_visit::walk_stmt(self, s); } fn visit_fn( &mut self, fk: hir_visit::FnKind<'tcx>, decl: &'tcx hir::FnDecl<'tcx>, body_id: hir::BodyId, span: Span, id: hir::HirId, ) { // Wrap in typeck results here, not just in visit_nested_body, // in order for `check_fn` to be able to use them. let old_enclosing_body = self.context.enclosing_body.replace(body_id); let old_cached_typeck_results = self.context.cached_typeck_results.take(); let body = self.context.tcx.hir().body(body_id); lint_callback!(self, check_fn, fk, decl, body, span, id); hir_visit::walk_fn(self, fk, decl, body_id, span, id); lint_callback!(self, check_fn_post, fk, decl, body, span, id); self.context.enclosing_body = old_enclosing_body; self.context.cached_typeck_results.set(old_cached_typeck_results); } fn visit_variant_data( &mut self, s: &'tcx hir::VariantData<'tcx>, _: Symbol, _: &'tcx hir::Generics<'tcx>, _: hir::HirId, _: Span, ) { lint_callback!(self, check_struct_def, s); hir_visit::walk_struct_def(self, s); lint_callback!(self, check_struct_def_post, s); } fn visit_field_def(&mut self, s: &'tcx hir::FieldDef<'tcx>) { self.with_lint_attrs(s.hir_id, |cx| { lint_callback!(cx, check_field_def, s); hir_visit::walk_field_def(cx, s); }) } fn visit_variant( &mut self, v: &'tcx hir::Variant<'tcx>, g: &'tcx hir::Generics<'tcx>, item_id: hir::HirId, ) { self.with_lint_attrs(v.id, |cx| { lint_callback!(cx, check_variant, v); hir_visit::walk_variant(cx, v, g, item_id); lint_callback!(cx, check_variant_post, v); }) } fn visit_ty(&mut self, t: &'tcx hir::Ty<'tcx>) { lint_callback!(self, check_ty, t); hir_visit::walk_ty(self, t); } fn visit_infer(&mut self, inf: &'tcx hir::InferArg) { lint_callback!(self, check_infer, inf); hir_visit::walk_inf(self, inf); } fn visit_name(&mut self, sp: Span, name: Symbol) { lint_callback!(self, check_name, sp, name); } fn visit_mod(&mut self, m: &'tcx hir::Mod<'tcx>, s: Span, n: hir::HirId) { if !self.context.only_module { self.process_mod(m, s, n); } } fn visit_local(&mut self, l: &'tcx hir::Local<'tcx>) { self.with_lint_attrs(l.hir_id, |cx| { lint_callback!(cx, check_local, l); hir_visit::walk_local(cx, l); }) } fn visit_block(&mut self, b: &'tcx hir::Block<'tcx>) { lint_callback!(self, check_block, b); hir_visit::walk_block(self, b); lint_callback!(self, check_block_post, b); } fn visit_arm(&mut self, a: &'tcx hir::Arm<'tcx>) { lint_callback!(self, check_arm, a); hir_visit::walk_arm(self, a); } fn visit_generic_param(&mut self, p: &'tcx hir::GenericParam<'tcx>) { lint_callback!(self, check_generic_param, p); hir_visit::walk_generic_param(self, p); } fn visit_generics(&mut self, g: &'tcx hir::Generics<'tcx>) { lint_callback!(self, check_generics, g); hir_visit::walk_generics(self, g); } fn visit_where_predicate(&mut self, p: &'tcx hir::WherePredicate<'tcx>) { lint_callback!(self, check_where_predicate, p); hir_visit::walk_where_predicate(self, p); } fn visit_poly_trait_ref( &mut self, t: &'tcx hir::PolyTraitRef<'tcx>, m: hir::TraitBoundModifier, ) { lint_callback!(self, check_poly_trait_ref, t, m); hir_visit::walk_poly_trait_ref(self, t, m); } fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) { let generics = self.context.generics.take(); self.context.generics = Some(&trait_item.generics); self.with_lint_attrs(trait_item.hir_id(), |cx| { cx.with_param_env(trait_item.hir_id(), |cx| { lint_callback!(cx, check_trait_item, trait_item); hir_visit::walk_trait_item(cx, trait_item); lint_callback!(cx, check_trait_item_post, trait_item); });<|fim▁hole|> }); self.context.generics = generics; } fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) { let generics = self.context.generics.take(); self.context.generics = Some(&impl_item.generics); self.with_lint_attrs(impl_item.hir_id(), |cx| { cx.with_param_env(impl_item.hir_id(), |cx| { lint_callback!(cx, check_impl_item, impl_item); hir_visit::walk_impl_item(cx, impl_item); lint_callback!(cx, check_impl_item_post, impl_item); }); }); self.context.generics = generics; } fn visit_lifetime(&mut self, lt: &'tcx hir::Lifetime) { lint_callback!(self, check_lifetime, lt); hir_visit::walk_lifetime(self, lt); } fn visit_path(&mut self, p: &'tcx hir::Path<'tcx>, id: hir::HirId) { lint_callback!(self, check_path, p, id); hir_visit::walk_path(self, p); } fn visit_attribute(&mut self, hir_id: hir::HirId, attr: &'tcx ast::Attribute) { self.with_lint_attrs(hir_id, |cx| { lint_callback!(cx, check_attribute, attr); }) } } struct LateLintPassObjects<'a> { lints: &'a mut [LateLintPassObject], } #[allow(rustc::lint_pass_impl_without_macro)] impl LintPass for LateLintPassObjects<'_> { fn name(&self) -> &'static str { panic!() } } macro_rules! expand_late_lint_pass_impl_methods { ([$hir:tt], [$($(#[$attr:meta])* fn $name:ident($($param:ident: $arg:ty),*);)*]) => ( $(fn $name(&mut self, context: &LateContext<$hir>, $($param: $arg),*) { for obj in self.lints.iter_mut() { obj.$name(context, $($param),*); } })* ) } macro_rules! late_lint_pass_impl { ([], [$hir:tt], $methods:tt) => { impl<$hir> LateLintPass<$hir> for LateLintPassObjects<'_> { expand_late_lint_pass_impl_methods!([$hir], $methods); } }; } crate::late_lint_methods!(late_lint_pass_impl, [], ['tcx]); fn late_lint_mod_pass<'tcx, T: LateLintPass<'tcx>>( tcx: TyCtxt<'tcx>, module_def_id: LocalDefId, pass: T, ) { let access_levels = &tcx.privacy_access_levels(()); let context = LateContext { tcx, enclosing_body: None, cached_typeck_results: Cell::new(None), param_env: ty::ParamEnv::empty(), access_levels, lint_store: unerased_lint_store(tcx), last_node_with_lint_attrs: tcx.hir().local_def_id_to_hir_id(module_def_id), generics: None, only_module: true, }; let mut cx = LateContextAndPass { context, pass }; let (module, span, hir_id) = tcx.hir().get_module(module_def_id); cx.process_mod(module, span, hir_id); // Visit the crate attributes if hir_id == hir::CRATE_HIR_ID { for attr in tcx.hir().attrs(hir::CRATE_HIR_ID).iter() { cx.visit_attribute(hir_id, attr) } } } pub fn late_lint_mod<'tcx, T: LateLintPass<'tcx>>( tcx: TyCtxt<'tcx>, module_def_id: LocalDefId, builtin_lints: T, ) { if tcx.sess.opts.debugging_opts.no_interleave_lints { // These passes runs in late_lint_crate with -Z no_interleave_lints return; } late_lint_mod_pass(tcx, module_def_id, builtin_lints); let mut passes: Vec<_> = unerased_lint_store(tcx).late_module_passes.iter().map(|pass| (pass)()).collect(); if !passes.is_empty() { late_lint_mod_pass(tcx, module_def_id, LateLintPassObjects { lints: &mut passes[..] }); } } fn late_lint_pass_crate<'tcx, T: LateLintPass<'tcx>>(tcx: TyCtxt<'tcx>, pass: T) { let access_levels = &tcx.privacy_access_levels(()); let krate = tcx.hir().krate(); let context = LateContext { tcx, enclosing_body: None, cached_typeck_results: Cell::new(None), param_env: ty::ParamEnv::empty(), access_levels, lint_store: unerased_lint_store(tcx), last_node_with_lint_attrs: hir::CRATE_HIR_ID, generics: None, only_module: false, }; let mut cx = LateContextAndPass { context, pass }; // Visit the whole crate. cx.with_lint_attrs(hir::CRATE_HIR_ID, |cx| { // since the root module isn't visited as an item (because it isn't an // item), warn for it here. lint_callback!(cx, check_crate, krate); tcx.hir().walk_toplevel_module(cx); tcx.hir().walk_attributes(cx); lint_callback!(cx, check_crate_post, krate); }) } fn late_lint_crate<'tcx, T: LateLintPass<'tcx>>(tcx: TyCtxt<'tcx>, builtin_lints: T) { let mut passes = unerased_lint_store(tcx).late_passes.iter().map(|p| (p)()).collect::<Vec<_>>(); if !tcx.sess.opts.debugging_opts.no_interleave_lints { if !passes.is_empty() { late_lint_pass_crate(tcx, LateLintPassObjects { lints: &mut passes[..] }); } late_lint_pass_crate(tcx, builtin_lints); } else { for pass in &mut passes { tcx.sess.prof.extra_verbose_generic_activity("run_late_lint", pass.name()).run(|| { late_lint_pass_crate(tcx, LateLintPassObjects { lints: slice::from_mut(pass) }); }); } let mut passes: Vec<_> = unerased_lint_store(tcx).late_module_passes.iter().map(|pass| (pass)()).collect(); for pass in &mut passes { tcx.sess.prof.extra_verbose_generic_activity("run_late_module_lint", pass.name()).run( || { late_lint_pass_crate(tcx, LateLintPassObjects { lints: slice::from_mut(pass) }); }, ); } } } /// Performs lint checking on a crate. pub fn check_crate<'tcx, T: LateLintPass<'tcx>>( tcx: TyCtxt<'tcx>, builtin_lints: impl FnOnce() -> T + Send, ) { join( || { tcx.sess.time("crate_lints", || { // Run whole crate non-incremental lints late_lint_crate(tcx, builtin_lints()); }); }, || { tcx.sess.time("module_lints", || { // Run per-module lints par_iter(&tcx.hir().krate().modules).for_each(|(&module, _)| { tcx.ensure().lint_mod(module); }); }); }, ); }<|fim▁end|>
<|file_name|>Tags.js<|end_file_name|><|fim▁begin|>/** * Interaction for the tags module<|fim▁hole|> */ jsBackend.tags = { // init, something like a constructor init: function() { $dataGridTag = $('.jsDataGrid td.tag'); if($dataGridTag.length > 0) $dataGridTag.inlineTextEdit({ params: { fork: { action: 'edit' } }, tooltip: jsBackend.locale.msg('ClickToEdit') }); } }; $(jsBackend.tags.init);<|fim▁end|>
* * @author Tijs Verkoyen <[email protected]>
<|file_name|>sales_schema.py<|end_file_name|><|fim▁begin|>import datetime<|fim▁hole|>class Store: def parse(self,line): fields=line.split('\t') self.id = fields[0] self.name = fields[1] return self def __repr__(self): return "Store: id=%s \t name=%s"%(self.id,self.name) class Product: def parse(self,line): fields=line.split('\t') self.id = fields[0] self.name = fields[1] self.category=fields[2] return self def __repr__(self): return "Product: id=%s \t name=%s"%(self.id,self.name) class SaleRow: def parse(self,line): fields=line.split('\t') self.day=fields[0] # maybe parse as date? see below:) # self.day=datetime.datetime.strptime(fields[0],"%Y-%m-%d") self.store_id=fields[1] self.product_id=fields[2] self.quantity=int(fields[3]) # let's parse this return self def __repr__(self): return "SaleRow: day=%s \t store_id=%s \t product_id=%s quantity=%d"%(self.day,self.store_id,self.product_id, self.quantity)<|fim▁end|>
<|file_name|>ScriptMgr.cpp<|end_file_name|><|fim▁begin|>/* * Copyright (C) 2008-2012 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2005-2009 MaNGOS <http://getmangos.com/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ScriptMgr.h" #include "Config.h" #include "DatabaseEnv.h" #include "DBCStores.h" #include "ObjectMgr.h" #include "OutdoorPvPMgr.h" #include "ScriptLoader.h" #include "ScriptSystem.h" #include "Transport.h" #include "Vehicle.h" #include "SpellInfo.h" #include "SpellScript.h" #include "GossipDef.h" #include "CreatureAI.h" #include "sc_npc_teleport.h" // This is the global static registry of scripts. template<class TScript> class ScriptRegistry { public: typedef std::map<uint32, TScript*> ScriptMap; typedef typename ScriptMap::iterator ScriptMapIterator; // The actual list of scripts. This will be accessed concurrently, so it must not be modified // after server startup. static ScriptMap ScriptPointerList; static void AddScript(TScript* const script) { ASSERT(script); // See if the script is using the same memory as another script. If this happens, it means that // someone forgot to allocate new memory for a script. for (ScriptMapIterator it = ScriptPointerList.begin(); it != ScriptPointerList.end(); ++it) { if (it->second == script) { sLog->outError(LOG_FILTER_TSCR, "Script '%s' has same memory pointer as '%s'.", script->GetName().c_str(), it->second->GetName().c_str()); return; } } if (script->IsDatabaseBound()) { // Get an ID for the script. An ID only exists if it's a script that is assigned in the database // through a script name (or similar). uint32 id = sObjectMgr->GetScriptId(script->GetName().c_str()); if (id) { // Try to find an existing script. bool existing = false; for (ScriptMapIterator it = ScriptPointerList.begin(); it != ScriptPointerList.end(); ++it) { // If the script names match... if (it->second->GetName() == script->GetName()) { // ... It exists. existing = true; break; } } // If the script isn't assigned -> assign it! if (!existing) { ScriptPointerList[id] = script; sScriptMgr->IncrementScriptCount(); } else { // If the script is already assigned -> delete it! sLog->outError(LOG_FILTER_TSCR, "Script '%s' already assigned with the same script name, so the script can't work.", script->GetName().c_str()); ASSERT(false); // Error that should be fixed ASAP. } } else { // The script uses a script name from database, but isn't assigned to anything. if (script->GetName().find("example") == std::string::npos && script->GetName().find("Smart") == std::string::npos) sLog->outError(LOG_FILTER_SQL, "Script named '%s' does not have a script name assigned in database.", script->GetName().c_str()); } } else { // We're dealing with a code-only script; just add it. ScriptPointerList[_scriptIdCounter++] = script; sScriptMgr->IncrementScriptCount(); } } // Gets a script by its ID (assigned by ObjectMgr). static TScript* GetScriptById(uint32 id) { ScriptMapIterator it = ScriptPointerList.find(id); if (it != ScriptPointerList.end()) return it->second; return NULL; } private: // Counter used for code-only scripts. static uint32 _scriptIdCounter; }; // Utility macros to refer to the script registry. #define SCR_REG_MAP(T) ScriptRegistry<T>::ScriptMap #define SCR_REG_ITR(T) ScriptRegistry<T>::ScriptMapIterator #define SCR_REG_LST(T) ScriptRegistry<T>::ScriptPointerList // Utility macros for looping over scripts. #define FOR_SCRIPTS(T, C, E) \ if (SCR_REG_LST(T).empty()) \ return; \ for (SCR_REG_ITR(T) C = SCR_REG_LST(T).begin(); \ C != SCR_REG_LST(T).end(); ++C) #define FOR_SCRIPTS_RET(T, C, E, R) \ if (SCR_REG_LST(T).empty()) \ return R; \ for (SCR_REG_ITR(T) C = SCR_REG_LST(T).begin(); \ C != SCR_REG_LST(T).end(); ++C) #define FOREACH_SCRIPT(T) \ FOR_SCRIPTS(T, itr, end) \ itr->second // Utility macros for finding specific scripts. #define GET_SCRIPT(T, I, V) \ T* V = ScriptRegistry<T>::GetScriptById(I); \ if (!V) \ return; #define GET_SCRIPT_RET(T, I, V, R) \ T* V = ScriptRegistry<T>::GetScriptById(I); \ if (!V) \ return R; void DoScriptText(int32 iTextEntry, WorldObject* pSource, Unit* target) { if (!pSource) { sLog->outError(LOG_FILTER_TSCR, "DoScriptText entry %i, invalid Source pointer.", iTextEntry); return; } if (iTextEntry >= 0) { sLog->outError(LOG_FILTER_TSCR, "DoScriptText with source entry %u (TypeId=%u, guid=%u) attempts to process text entry %i, but text entry must be negative.", pSource->GetEntry(), pSource->GetTypeId(), pSource->GetGUIDLow(), iTextEntry); return; } const StringTextData* pData = sScriptSystemMgr->GetTextData(iTextEntry); if (!pData) { sLog->outError(LOG_FILTER_TSCR, "DoScriptText with source entry %u (TypeId=%u, guid=%u) could not find text entry %i.", pSource->GetEntry(), pSource->GetTypeId(), pSource->GetGUIDLow(), iTextEntry); return; } sLog->outDebug(LOG_FILTER_TSCR, "DoScriptText: text entry=%i, Sound=%u, Type=%u, Language=%u, Emote=%u", iTextEntry, pData->uiSoundId, pData->uiType, pData->uiLanguage, pData->uiEmote); if (pData->uiSoundId) { if (sSoundEntriesStore.LookupEntry(pData->uiSoundId)) pSource->SendPlaySound(pData->uiSoundId, false); else sLog->outError(LOG_FILTER_TSCR, "DoScriptText entry %i tried to process invalid sound id %u.", iTextEntry, pData->uiSoundId); } if (pData->uiEmote) { if (pSource->GetTypeId() == TYPEID_UNIT || pSource->GetTypeId() == TYPEID_PLAYER) ((Unit*)pSource)->HandleEmoteCommand(pData->uiEmote); else sLog->outError(LOG_FILTER_TSCR, "DoScriptText entry %i tried to process emote for invalid TypeId (%u).", iTextEntry, pSource->GetTypeId()); } switch (pData->uiType) { case CHAT_TYPE_SAY: pSource->MonsterSay(iTextEntry, pData->uiLanguage, target ? target->GetGUID() : 0); break; case CHAT_TYPE_YELL: pSource->MonsterYell(iTextEntry, pData->uiLanguage, target ? target->GetGUID() : 0); break; case CHAT_TYPE_TEXT_EMOTE: pSource->MonsterTextEmote(iTextEntry, target ? target->GetGUID() : 0); break; case CHAT_TYPE_BOSS_EMOTE: pSource->MonsterTextEmote(iTextEntry, target ? target->GetGUID() : 0, true); break; case CHAT_TYPE_WHISPER: { if (target && target->GetTypeId() == TYPEID_PLAYER) pSource->MonsterWhisper(iTextEntry, target->GetGUID()); else sLog->outError(LOG_FILTER_TSCR, "DoScriptText entry %i cannot whisper without target unit (TYPEID_PLAYER).", iTextEntry); break; } case CHAT_TYPE_BOSS_WHISPER: { if (target && target->GetTypeId() == TYPEID_PLAYER) pSource->MonsterWhisper(iTextEntry, target->GetGUID(), true); else sLog->outError(LOG_FILTER_TSCR, "DoScriptText entry %i cannot whisper without target unit (TYPEID_PLAYER).", iTextEntry); break; } case CHAT_TYPE_ZONE_YELL: pSource->MonsterYellToZone(iTextEntry, pData->uiLanguage, target ? target->GetGUID() : 0); break; } } ScriptMgr::ScriptMgr() : _scriptCount(0), _scheduledScripts(0) { } ScriptMgr::~ScriptMgr() { } void ScriptMgr::Initialize() { uint32 oldMSTime = getMSTime(); LoadDatabase(); // Load TeleNPC2 - maybe not the best place to load it ... LoadNpcTele(); sLog->outInfo(LOG_FILTER_SERVER_LOADING, "Loading C++ scripts"); FillSpellSummary(); AddScripts(); sLog->outInfo(LOG_FILTER_SERVER_LOADING, ">> Loaded %u C++ scripts in %u ms", GetScriptCount(), GetMSTimeDiffToNow(oldMSTime)); } void ScriptMgr::Unload() { #define SCR_CLEAR(T) \ for (SCR_REG_ITR(T) itr = SCR_REG_LST(T).begin(); itr != SCR_REG_LST(T).end(); ++itr) \ delete itr->second; \ SCR_REG_LST(T).clear(); // Clear scripts for every script type. SCR_CLEAR(SpellScriptLoader); SCR_CLEAR(ServerScript); SCR_CLEAR(WorldScript); SCR_CLEAR(FormulaScript); SCR_CLEAR(WorldMapScript); SCR_CLEAR(InstanceMapScript); SCR_CLEAR(BattlegroundMapScript); SCR_CLEAR(ItemScript); SCR_CLEAR(CreatureScript); SCR_CLEAR(GameObjectScript); SCR_CLEAR(AreaTriggerScript); SCR_CLEAR(BattlegroundScript); SCR_CLEAR(OutdoorPvPScript); SCR_CLEAR(CommandScript); SCR_CLEAR(WeatherScript); SCR_CLEAR(AuctionHouseScript); SCR_CLEAR(ConditionScript); SCR_CLEAR(VehicleScript); SCR_CLEAR(DynamicObjectScript); SCR_CLEAR(TransportScript); SCR_CLEAR(AchievementCriteriaScript); SCR_CLEAR(PlayerScript); SCR_CLEAR(GuildScript); SCR_CLEAR(GroupScript); #undef SCR_CLEAR } void ScriptMgr::LoadDatabase() { sScriptSystemMgr->LoadScriptTexts(); sScriptSystemMgr->LoadScriptTextsCustom(); sScriptSystemMgr->LoadScriptWaypoints(); } struct TSpellSummary { uint8 Targets; // set of enum SelectTarget uint8 Effects; // set of enum SelectEffect } *SpellSummary; void ScriptMgr::FillSpellSummary() { SpellSummary = new TSpellSummary[sSpellMgr->GetSpellInfoStoreSize()]; SpellInfo const* pTempSpell; for (uint32 i = 0; i < sSpellMgr->GetSpellInfoStoreSize(); ++i) {<|fim▁hole|> SpellSummary[i].Effects = 0; SpellSummary[i].Targets = 0; pTempSpell = sSpellMgr->GetSpellInfo(i); // This spell doesn't exist. if (!pTempSpell) continue; for (uint32 j = 0; j < MAX_SPELL_EFFECTS; ++j) { // Spell targets self. if (pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_UNIT_CASTER) SpellSummary[i].Targets |= 1 << (SELECT_TARGET_SELF-1); // Spell targets a single enemy. if (pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_UNIT_TARGET_ENEMY || pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_DEST_TARGET_ENEMY) SpellSummary[i].Targets |= 1 << (SELECT_TARGET_SINGLE_ENEMY-1); // Spell targets AoE at enemy. if (pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_UNIT_SRC_AREA_ENEMY || pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_UNIT_DEST_AREA_ENEMY || pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_SRC_CASTER || pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_DEST_DYNOBJ_ENEMY) SpellSummary[i].Targets |= 1 << (SELECT_TARGET_AOE_ENEMY-1); // Spell targets an enemy. if (pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_UNIT_TARGET_ENEMY || pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_DEST_TARGET_ENEMY || pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_UNIT_SRC_AREA_ENEMY || pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_UNIT_DEST_AREA_ENEMY || pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_SRC_CASTER || pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_DEST_DYNOBJ_ENEMY) SpellSummary[i].Targets |= 1 << (SELECT_TARGET_ANY_ENEMY-1); // Spell targets a single friend (or self). if (pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_UNIT_CASTER || pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_UNIT_TARGET_ALLY || pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_UNIT_TARGET_PARTY) SpellSummary[i].Targets |= 1 << (SELECT_TARGET_SINGLE_FRIEND-1); // Spell targets AoE friends. if (pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_UNIT_CASTER_AREA_PARTY || pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_UNIT_LASTTARGET_AREA_PARTY || pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_SRC_CASTER) SpellSummary[i].Targets |= 1 << (SELECT_TARGET_AOE_FRIEND-1); // Spell targets any friend (or self). if (pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_UNIT_CASTER || pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_UNIT_TARGET_ALLY || pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_UNIT_TARGET_PARTY || pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_UNIT_CASTER_AREA_PARTY || pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_UNIT_LASTTARGET_AREA_PARTY || pTempSpell->Effects[j].TargetA.GetTarget() == TARGET_SRC_CASTER) SpellSummary[i].Targets |= 1 << (SELECT_TARGET_ANY_FRIEND-1); // Make sure that this spell includes a damage effect. if (pTempSpell->Effects[j].Effect == SPELL_EFFECT_SCHOOL_DAMAGE || pTempSpell->Effects[j].Effect == SPELL_EFFECT_INSTAKILL || pTempSpell->Effects[j].Effect == SPELL_EFFECT_ENVIRONMENTAL_DAMAGE || pTempSpell->Effects[j].Effect == SPELL_EFFECT_HEALTH_LEECH) SpellSummary[i].Effects |= 1 << (SELECT_EFFECT_DAMAGE-1); // Make sure that this spell includes a healing effect (or an apply aura with a periodic heal). if (pTempSpell->Effects[j].Effect == SPELL_EFFECT_HEAL || pTempSpell->Effects[j].Effect == SPELL_EFFECT_HEAL_MAX_HEALTH || pTempSpell->Effects[j].Effect == SPELL_EFFECT_HEAL_MECHANICAL || (pTempSpell->Effects[j].Effect == SPELL_EFFECT_APPLY_AURA && pTempSpell->Effects[j].ApplyAuraName == 8)) SpellSummary[i].Effects |= 1 << (SELECT_EFFECT_HEALING-1); // Make sure that this spell applies an aura. if (pTempSpell->Effects[j].Effect == SPELL_EFFECT_APPLY_AURA) SpellSummary[i].Effects |= 1 << (SELECT_EFFECT_AURA-1); } } } void ScriptMgr::CreateSpellScripts(uint32 spellId, std::list<SpellScript*>& scriptVector) { SpellScriptsBounds bounds = sObjectMgr->GetSpellScriptsBounds(spellId); for (SpellScriptsContainer::iterator itr = bounds.first; itr != bounds.second; ++itr) { SpellScriptLoader* tmpscript = ScriptRegistry<SpellScriptLoader>::GetScriptById(itr->second); if (!tmpscript) continue; SpellScript* script = tmpscript->GetSpellScript(); if (!script) continue; script->_Init(&tmpscript->GetName(), spellId); scriptVector.push_back(script); } } void ScriptMgr::CreateAuraScripts(uint32 spellId, std::list<AuraScript*>& scriptVector) { SpellScriptsBounds bounds = sObjectMgr->GetSpellScriptsBounds(spellId); for (SpellScriptsContainer::iterator itr = bounds.first; itr != bounds.second; ++itr) { SpellScriptLoader* tmpscript = ScriptRegistry<SpellScriptLoader>::GetScriptById(itr->second); if (!tmpscript) continue; AuraScript* script = tmpscript->GetAuraScript(); if (!script) continue; script->_Init(&tmpscript->GetName(), spellId); scriptVector.push_back(script); } } void ScriptMgr::CreateSpellScriptLoaders(uint32 spellId, std::vector<std::pair<SpellScriptLoader*, SpellScriptsContainer::iterator> >& scriptVector) { SpellScriptsBounds bounds = sObjectMgr->GetSpellScriptsBounds(spellId); scriptVector.reserve(std::distance(bounds.first, bounds.second)); for (SpellScriptsContainer::iterator itr = bounds.first; itr != bounds.second; ++itr) { SpellScriptLoader* tmpscript = ScriptRegistry<SpellScriptLoader>::GetScriptById(itr->second); if (!tmpscript) continue; scriptVector.push_back(std::make_pair(tmpscript, itr)); } } void ScriptMgr::OnNetworkStart() { FOREACH_SCRIPT(ServerScript)->OnNetworkStart(); } void ScriptMgr::OnNetworkStop() { FOREACH_SCRIPT(ServerScript)->OnNetworkStop(); } void ScriptMgr::OnSocketOpen(WorldSocket* socket) { ASSERT(socket); FOREACH_SCRIPT(ServerScript)->OnSocketOpen(socket); } void ScriptMgr::OnSocketClose(WorldSocket* socket, bool wasNew) { ASSERT(socket); FOREACH_SCRIPT(ServerScript)->OnSocketClose(socket, wasNew); } void ScriptMgr::OnPacketReceive(WorldSocket* socket, WorldPacket packet) { ASSERT(socket); FOREACH_SCRIPT(ServerScript)->OnPacketReceive(socket, packet); } void ScriptMgr::OnPacketSend(WorldSocket* socket, WorldPacket packet) { ASSERT(socket); FOREACH_SCRIPT(ServerScript)->OnPacketSend(socket, packet); } void ScriptMgr::OnUnknownPacketReceive(WorldSocket* socket, WorldPacket packet) { ASSERT(socket); FOREACH_SCRIPT(ServerScript)->OnUnknownPacketReceive(socket, packet); } void ScriptMgr::OnOpenStateChange(bool open) { FOREACH_SCRIPT(WorldScript)->OnOpenStateChange(open); } void ScriptMgr::OnConfigLoad(bool reload) { FOREACH_SCRIPT(WorldScript)->OnConfigLoad(reload); } void ScriptMgr::OnMotdChange(std::string& newMotd) { FOREACH_SCRIPT(WorldScript)->OnMotdChange(newMotd); } void ScriptMgr::OnShutdownInitiate(ShutdownExitCode code, ShutdownMask mask) { FOREACH_SCRIPT(WorldScript)->OnShutdownInitiate(code, mask); } void ScriptMgr::OnShutdownCancel() { FOREACH_SCRIPT(WorldScript)->OnShutdownCancel(); } void ScriptMgr::OnWorldUpdate(uint32 diff) { FOREACH_SCRIPT(WorldScript)->OnUpdate(diff); } void ScriptMgr::OnHonorCalculation(float& honor, uint8 level, float multiplier) { FOREACH_SCRIPT(FormulaScript)->OnHonorCalculation(honor, level, multiplier); } void ScriptMgr::OnGrayLevelCalculation(uint8& grayLevel, uint8 playerLevel) { FOREACH_SCRIPT(FormulaScript)->OnGrayLevelCalculation(grayLevel, playerLevel); } void ScriptMgr::OnColorCodeCalculation(XPColorChar& color, uint8 playerLevel, uint8 mobLevel) { FOREACH_SCRIPT(FormulaScript)->OnColorCodeCalculation(color, playerLevel, mobLevel); } void ScriptMgr::OnZeroDifferenceCalculation(uint8& diff, uint8 playerLevel) { FOREACH_SCRIPT(FormulaScript)->OnZeroDifferenceCalculation(diff, playerLevel); } void ScriptMgr::OnBaseGainCalculation(uint32& gain, uint8 playerLevel, uint8 mobLevel, ContentLevels content) { FOREACH_SCRIPT(FormulaScript)->OnBaseGainCalculation(gain, playerLevel, mobLevel, content); } void ScriptMgr::OnGainCalculation(uint32& gain, Player* player, Unit* unit) { ASSERT(player); ASSERT(unit); FOREACH_SCRIPT(FormulaScript)->OnGainCalculation(gain, player, unit); } void ScriptMgr::OnGroupRateCalculation(float& rate, uint32 count, bool isRaid) { FOREACH_SCRIPT(FormulaScript)->OnGroupRateCalculation(rate, count, isRaid); } #define SCR_MAP_BGN(M, V, I, E, C, T) \ if (V->GetEntry()->T()) \ { \ FOR_SCRIPTS(M, I, E) \ { \ MapEntry const* C = I->second->GetEntry(); \ if (!C) \ continue; \ if (entry->MapID == V->GetId()) \ { #define SCR_MAP_END \ return; \ } \ } \ } void ScriptMgr::OnCreateMap(Map* map) { ASSERT(map); SCR_MAP_BGN(WorldMapScript, map, itr, end, entry, IsWorldMap); itr->second->OnCreate(map); SCR_MAP_END; SCR_MAP_BGN(InstanceMapScript, map, itr, end, entry, IsDungeon); itr->second->OnCreate((InstanceMap*)map); SCR_MAP_END; SCR_MAP_BGN(BattlegroundMapScript, map, itr, end, entry, IsBattleground); itr->second->OnCreate((BattlegroundMap*)map); SCR_MAP_END; } void ScriptMgr::OnDestroyMap(Map* map) { ASSERT(map); SCR_MAP_BGN(WorldMapScript, map, itr, end, entry, IsWorldMap); itr->second->OnDestroy(map); SCR_MAP_END; SCR_MAP_BGN(InstanceMapScript, map, itr, end, entry, IsDungeon); itr->second->OnDestroy((InstanceMap*)map); SCR_MAP_END; SCR_MAP_BGN(BattlegroundMapScript, map, itr, end, entry, IsBattleground); itr->second->OnDestroy((BattlegroundMap*)map); SCR_MAP_END; } void ScriptMgr::OnLoadGridMap(Map* map, GridMap* gmap, uint32 gx, uint32 gy) { ASSERT(map); ASSERT(gmap); SCR_MAP_BGN(WorldMapScript, map, itr, end, entry, IsWorldMap); itr->second->OnLoadGridMap(map, gmap, gx, gy); SCR_MAP_END; SCR_MAP_BGN(InstanceMapScript, map, itr, end, entry, IsDungeon); itr->second->OnLoadGridMap((InstanceMap*)map, gmap, gx, gy); SCR_MAP_END; SCR_MAP_BGN(BattlegroundMapScript, map, itr, end, entry, IsBattleground); itr->second->OnLoadGridMap((BattlegroundMap*)map, gmap, gx, gy); SCR_MAP_END; } void ScriptMgr::OnUnloadGridMap(Map* map, GridMap* gmap, uint32 gx, uint32 gy) { ASSERT(map); ASSERT(gmap); SCR_MAP_BGN(WorldMapScript, map, itr, end, entry, IsWorldMap); itr->second->OnUnloadGridMap(map, gmap, gx, gy); SCR_MAP_END; SCR_MAP_BGN(InstanceMapScript, map, itr, end, entry, IsDungeon); itr->second->OnUnloadGridMap((InstanceMap*)map, gmap, gx, gy); SCR_MAP_END; SCR_MAP_BGN(BattlegroundMapScript, map, itr, end, entry, IsBattleground); itr->second->OnUnloadGridMap((BattlegroundMap*)map, gmap, gx, gy); SCR_MAP_END; } void ScriptMgr::OnPlayerEnterMap(Map* map, Player* player) { ASSERT(map); ASSERT(player); SCR_MAP_BGN(WorldMapScript, map, itr, end, entry, IsWorldMap); itr->second->OnPlayerEnter(map, player); SCR_MAP_END; SCR_MAP_BGN(InstanceMapScript, map, itr, end, entry, IsDungeon); itr->second->OnPlayerEnter((InstanceMap*)map, player); SCR_MAP_END; SCR_MAP_BGN(BattlegroundMapScript, map, itr, end, entry, IsBattleground); itr->second->OnPlayerEnter((BattlegroundMap*)map, player); SCR_MAP_END; } void ScriptMgr::OnPlayerLeaveMap(Map* map, Player* player) { ASSERT(map); ASSERT(player); SCR_MAP_BGN(WorldMapScript, map, itr, end, entry, IsWorldMap); itr->second->OnPlayerLeave(map, player); SCR_MAP_END; SCR_MAP_BGN(InstanceMapScript, map, itr, end, entry, IsDungeon); itr->second->OnPlayerLeave((InstanceMap*)map, player); SCR_MAP_END; SCR_MAP_BGN(BattlegroundMapScript, map, itr, end, entry, IsBattleground); itr->second->OnPlayerLeave((BattlegroundMap*)map, player); SCR_MAP_END; } void ScriptMgr::OnMapUpdate(Map* map, uint32 diff) { ASSERT(map); SCR_MAP_BGN(WorldMapScript, map, itr, end, entry, IsWorldMap); itr->second->OnUpdate(map, diff); SCR_MAP_END; SCR_MAP_BGN(InstanceMapScript, map, itr, end, entry, IsDungeon); itr->second->OnUpdate((InstanceMap*)map, diff); SCR_MAP_END; SCR_MAP_BGN(BattlegroundMapScript, map, itr, end, entry, IsBattleground); itr->second->OnUpdate((BattlegroundMap*)map, diff); SCR_MAP_END; } #undef SCR_MAP_BGN #undef SCR_MAP_END InstanceScript* ScriptMgr::CreateInstanceData(InstanceMap* map) { ASSERT(map); GET_SCRIPT_RET(InstanceMapScript, map->GetScriptId(), tmpscript, NULL); return tmpscript->GetInstanceScript(map); } bool ScriptMgr::OnDummyEffect(Unit* caster, uint32 spellId, SpellEffIndex effIndex, Item* target) { ASSERT(caster); ASSERT(target); GET_SCRIPT_RET(ItemScript, target->GetScriptId(), tmpscript, false); return tmpscript->OnDummyEffect(caster, spellId, effIndex, target); } bool ScriptMgr::OnQuestAccept(Player* player, Item* item, Quest const* quest) { ASSERT(player); ASSERT(item); ASSERT(quest); GET_SCRIPT_RET(ItemScript, item->GetScriptId(), tmpscript, false); player->PlayerTalkClass->ClearMenus(); return tmpscript->OnQuestAccept(player, item, quest); } bool ScriptMgr::OnItemUse(Player* player, Item* item, SpellCastTargets const& targets) { ASSERT(player); ASSERT(item); GET_SCRIPT_RET(ItemScript, item->GetScriptId(), tmpscript, false); return tmpscript->OnUse(player, item, targets); } bool ScriptMgr::OnItemExpire(Player* player, ItemTemplate const* proto) { ASSERT(player); ASSERT(proto); GET_SCRIPT_RET(ItemScript, proto->ScriptId, tmpscript, false); return tmpscript->OnExpire(player, proto); } bool ScriptMgr::OnDummyEffect(Unit* caster, uint32 spellId, SpellEffIndex effIndex, Creature* target) { ASSERT(caster); ASSERT(target); GET_SCRIPT_RET(CreatureScript, target->GetScriptId(), tmpscript, false); return tmpscript->OnDummyEffect(caster, spellId, effIndex, target); } bool ScriptMgr::OnGossipHello(Player* player, Creature* creature) { ASSERT(player); ASSERT(creature); GET_SCRIPT_RET(CreatureScript, creature->GetScriptId(), tmpscript, false); player->PlayerTalkClass->ClearMenus(); return tmpscript->OnGossipHello(player, creature); } bool ScriptMgr::OnGossipSelect(Player* player, Creature* creature, uint32 sender, uint32 action) { ASSERT(player); ASSERT(creature); GET_SCRIPT_RET(CreatureScript, creature->GetScriptId(), tmpscript, false); return tmpscript->OnGossipSelect(player, creature, sender, action); } bool ScriptMgr::OnGossipSelectCode(Player* player, Creature* creature, uint32 sender, uint32 action, const char* code) { ASSERT(player); ASSERT(creature); ASSERT(code); GET_SCRIPT_RET(CreatureScript, creature->GetScriptId(), tmpscript, false); return tmpscript->OnGossipSelectCode(player, creature, sender, action, code); } bool ScriptMgr::OnQuestAccept(Player* player, Creature* creature, Quest const* quest) { ASSERT(player); ASSERT(creature); ASSERT(quest); GET_SCRIPT_RET(CreatureScript, creature->GetScriptId(), tmpscript, false); player->PlayerTalkClass->ClearMenus(); return tmpscript->OnQuestAccept(player, creature, quest); } bool ScriptMgr::OnQuestSelect(Player* player, Creature* creature, Quest const* quest) { ASSERT(player); ASSERT(creature); ASSERT(quest); GET_SCRIPT_RET(CreatureScript, creature->GetScriptId(), tmpscript, false); player->PlayerTalkClass->ClearMenus(); return tmpscript->OnQuestSelect(player, creature, quest); } bool ScriptMgr::OnQuestComplete(Player* player, Creature* creature, Quest const* quest) { ASSERT(player); ASSERT(creature); ASSERT(quest); GET_SCRIPT_RET(CreatureScript, creature->GetScriptId(), tmpscript, false); player->PlayerTalkClass->ClearMenus(); return tmpscript->OnQuestComplete(player, creature, quest); } bool ScriptMgr::OnQuestReward(Player* player, Creature* creature, Quest const* quest, uint32 opt) { ASSERT(player); ASSERT(creature); ASSERT(quest); GET_SCRIPT_RET(CreatureScript, creature->GetScriptId(), tmpscript, false); player->PlayerTalkClass->ClearMenus(); return tmpscript->OnQuestReward(player, creature, quest, opt); } uint32 ScriptMgr::GetDialogStatus(Player* player, Creature* creature) { ASSERT(player); ASSERT(creature); // TODO: 100 is a funny magic number to have hanging around here... GET_SCRIPT_RET(CreatureScript, creature->GetScriptId(), tmpscript, 100); player->PlayerTalkClass->ClearMenus(); return tmpscript->GetDialogStatus(player, creature); } CreatureAI* ScriptMgr::GetCreatureAI(Creature* creature) { ASSERT(creature); GET_SCRIPT_RET(CreatureScript, creature->GetScriptId(), tmpscript, NULL); return tmpscript->GetAI(creature); } GameObjectAI* ScriptMgr::GetGameObjectAI(GameObject* gameobject) { ASSERT(gameobject); GET_SCRIPT_RET(GameObjectScript, gameobject->GetScriptId(), tmpscript, NULL); return tmpscript->GetAI(gameobject); } void ScriptMgr::OnCreatureUpdate(Creature* creature, uint32 diff) { ASSERT(creature); GET_SCRIPT(CreatureScript, creature->GetScriptId(), tmpscript); tmpscript->OnUpdate(creature, diff); } bool ScriptMgr::OnGossipHello(Player* player, GameObject* go) { ASSERT(player); ASSERT(go); GET_SCRIPT_RET(GameObjectScript, go->GetScriptId(), tmpscript, false); player->PlayerTalkClass->ClearMenus(); return tmpscript->OnGossipHello(player, go); } bool ScriptMgr::OnGossipSelect(Player* player, GameObject* go, uint32 sender, uint32 action) { ASSERT(player); ASSERT(go); GET_SCRIPT_RET(GameObjectScript, go->GetScriptId(), tmpscript, false); return tmpscript->OnGossipSelect(player, go, sender, action); } bool ScriptMgr::OnGossipSelectCode(Player* player, GameObject* go, uint32 sender, uint32 action, const char* code) { ASSERT(player); ASSERT(go); ASSERT(code); GET_SCRIPT_RET(GameObjectScript, go->GetScriptId(), tmpscript, false); return tmpscript->OnGossipSelectCode(player, go, sender, action, code); } bool ScriptMgr::OnQuestAccept(Player* player, GameObject* go, Quest const* quest) { ASSERT(player); ASSERT(go); ASSERT(quest); GET_SCRIPT_RET(GameObjectScript, go->GetScriptId(), tmpscript, false); player->PlayerTalkClass->ClearMenus(); return tmpscript->OnQuestAccept(player, go, quest); } bool ScriptMgr::OnQuestReward(Player* player, GameObject* go, Quest const* quest, uint32 opt) { ASSERT(player); ASSERT(go); ASSERT(quest); GET_SCRIPT_RET(GameObjectScript, go->GetScriptId(), tmpscript, false); player->PlayerTalkClass->ClearMenus(); return tmpscript->OnQuestReward(player, go, quest, opt); } uint32 ScriptMgr::GetDialogStatus(Player* player, GameObject* go) { ASSERT(player); ASSERT(go); // TODO: 100 is a funny magic number to have hanging around here... GET_SCRIPT_RET(GameObjectScript, go->GetScriptId(), tmpscript, 100); player->PlayerTalkClass->ClearMenus(); return tmpscript->GetDialogStatus(player, go); } void ScriptMgr::OnGameObjectDestroyed(GameObject* go, Player* player) { ASSERT(go); GET_SCRIPT(GameObjectScript, go->GetScriptId(), tmpscript); tmpscript->OnDestroyed(go, player); } void ScriptMgr::OnGameObjectDamaged(GameObject* go, Player* player) { ASSERT(go); GET_SCRIPT(GameObjectScript, go->GetScriptId(), tmpscript); tmpscript->OnDamaged(go, player); } void ScriptMgr::OnGameObjectLootStateChanged(GameObject* go, uint32 state, Unit* unit) { ASSERT(go); GET_SCRIPT(GameObjectScript, go->GetScriptId(), tmpscript); tmpscript->OnLootStateChanged(go, state, unit); } void ScriptMgr::OnGameObjectStateChanged(GameObject* go, uint32 state) { ASSERT(go); GET_SCRIPT(GameObjectScript, go->GetScriptId(), tmpscript); tmpscript->OnGameObjectStateChanged(go, state); } void ScriptMgr::OnGameObjectUpdate(GameObject* go, uint32 diff) { ASSERT(go); GET_SCRIPT(GameObjectScript, go->GetScriptId(), tmpscript); tmpscript->OnUpdate(go, diff); } bool ScriptMgr::OnDummyEffect(Unit* caster, uint32 spellId, SpellEffIndex effIndex, GameObject* target) { ASSERT(caster); ASSERT(target); GET_SCRIPT_RET(GameObjectScript, target->GetScriptId(), tmpscript, false); return tmpscript->OnDummyEffect(caster, spellId, effIndex, target); } bool ScriptMgr::OnAreaTrigger(Player* player, AreaTriggerEntry const* trigger) { ASSERT(player); ASSERT(trigger); GET_SCRIPT_RET(AreaTriggerScript, sObjectMgr->GetAreaTriggerScriptId(trigger->id), tmpscript, false); return tmpscript->OnTrigger(player, trigger); } Battleground* ScriptMgr::CreateBattleground(BattlegroundTypeId /*typeId*/) { // TODO: Implement script-side battlegrounds. ASSERT(false); return NULL; } OutdoorPvP* ScriptMgr::CreateOutdoorPvP(OutdoorPvPData const* data) { ASSERT(data); GET_SCRIPT_RET(OutdoorPvPScript, data->ScriptId, tmpscript, NULL); return tmpscript->GetOutdoorPvP(); } std::vector<ChatCommand*> ScriptMgr::GetChatCommands() { std::vector<ChatCommand*> table; FOR_SCRIPTS_RET(CommandScript, itr, end, table) table.push_back(itr->second->GetCommands()); return table; } void ScriptMgr::OnWeatherChange(Weather* weather, WeatherState state, float grade) { ASSERT(weather); GET_SCRIPT(WeatherScript, weather->GetScriptId(), tmpscript); tmpscript->OnChange(weather, state, grade); } void ScriptMgr::OnWeatherUpdate(Weather* weather, uint32 diff) { ASSERT(weather); GET_SCRIPT(WeatherScript, weather->GetScriptId(), tmpscript); tmpscript->OnUpdate(weather, diff); } void ScriptMgr::OnAuctionAdd(AuctionHouseObject* ah, AuctionEntry* entry) { ASSERT(ah); ASSERT(entry); FOREACH_SCRIPT(AuctionHouseScript)->OnAuctionAdd(ah, entry); } void ScriptMgr::OnAuctionRemove(AuctionHouseObject* ah, AuctionEntry* entry) { ASSERT(ah); ASSERT(entry); FOREACH_SCRIPT(AuctionHouseScript)->OnAuctionRemove(ah, entry); } void ScriptMgr::OnAuctionSuccessful(AuctionHouseObject* ah, AuctionEntry* entry) { ASSERT(ah); ASSERT(entry); FOREACH_SCRIPT(AuctionHouseScript)->OnAuctionSuccessful(ah, entry); } void ScriptMgr::OnAuctionExpire(AuctionHouseObject* ah, AuctionEntry* entry) { ASSERT(ah); ASSERT(entry); FOREACH_SCRIPT(AuctionHouseScript)->OnAuctionExpire(ah, entry); } bool ScriptMgr::OnConditionCheck(Condition* condition, ConditionSourceInfo& sourceInfo) { ASSERT(condition); GET_SCRIPT_RET(ConditionScript, condition->ScriptId, tmpscript, true); return tmpscript->OnConditionCheck(condition, sourceInfo); } void ScriptMgr::OnInstall(Vehicle* veh) { ASSERT(veh); ASSERT(veh->GetBase()->GetTypeId() == TYPEID_UNIT); GET_SCRIPT(VehicleScript, veh->GetBase()->ToCreature()->GetScriptId(), tmpscript); tmpscript->OnInstall(veh); } void ScriptMgr::OnUninstall(Vehicle* veh) { ASSERT(veh); ASSERT(veh->GetBase()->GetTypeId() == TYPEID_UNIT); GET_SCRIPT(VehicleScript, veh->GetBase()->ToCreature()->GetScriptId(), tmpscript); tmpscript->OnUninstall(veh); } void ScriptMgr::OnReset(Vehicle* veh) { ASSERT(veh); ASSERT(veh->GetBase()->GetTypeId() == TYPEID_UNIT); GET_SCRIPT(VehicleScript, veh->GetBase()->ToCreature()->GetScriptId(), tmpscript); tmpscript->OnReset(veh); } void ScriptMgr::OnInstallAccessory(Vehicle* veh, Creature* accessory) { ASSERT(veh); ASSERT(veh->GetBase()->GetTypeId() == TYPEID_UNIT); ASSERT(accessory); GET_SCRIPT(VehicleScript, veh->GetBase()->ToCreature()->GetScriptId(), tmpscript); tmpscript->OnInstallAccessory(veh, accessory); } void ScriptMgr::OnAddPassenger(Vehicle* veh, Unit* passenger, int8 seatId) { ASSERT(veh); ASSERT(veh->GetBase()->GetTypeId() == TYPEID_UNIT); ASSERT(passenger); GET_SCRIPT(VehicleScript, veh->GetBase()->ToCreature()->GetScriptId(), tmpscript); tmpscript->OnAddPassenger(veh, passenger, seatId); } void ScriptMgr::OnRemovePassenger(Vehicle* veh, Unit* passenger) { ASSERT(veh); ASSERT(veh->GetBase()->GetTypeId() == TYPEID_UNIT); ASSERT(passenger); GET_SCRIPT(VehicleScript, veh->GetBase()->ToCreature()->GetScriptId(), tmpscript); tmpscript->OnRemovePassenger(veh, passenger); } void ScriptMgr::OnDynamicObjectUpdate(DynamicObject* dynobj, uint32 diff) { ASSERT(dynobj); FOR_SCRIPTS(DynamicObjectScript, itr, end) itr->second->OnUpdate(dynobj, diff); } void ScriptMgr::OnAddPassenger(Transport* transport, Player* player) { ASSERT(transport); ASSERT(player); GET_SCRIPT(TransportScript, transport->GetScriptId(), tmpscript); tmpscript->OnAddPassenger(transport, player); } void ScriptMgr::OnAddCreaturePassenger(Transport* transport, Creature* creature) { ASSERT(transport); ASSERT(creature); GET_SCRIPT(TransportScript, transport->GetScriptId(), tmpscript); tmpscript->OnAddCreaturePassenger(transport, creature); } void ScriptMgr::OnRemovePassenger(Transport* transport, Player* player) { ASSERT(transport); ASSERT(player); GET_SCRIPT(TransportScript, transport->GetScriptId(), tmpscript); tmpscript->OnRemovePassenger(transport, player); } void ScriptMgr::OnTransportUpdate(Transport* transport, uint32 diff) { ASSERT(transport); GET_SCRIPT(TransportScript, transport->GetScriptId(), tmpscript); tmpscript->OnUpdate(transport, diff); } void ScriptMgr::OnRelocate(Transport* transport, uint32 waypointId, uint32 mapId, float x, float y, float z) { GET_SCRIPT(TransportScript, transport->GetScriptId(), tmpscript); tmpscript->OnRelocate(transport, waypointId, mapId, x, y, z); } void ScriptMgr::OnStartup() { FOREACH_SCRIPT(WorldScript)->OnStartup(); } void ScriptMgr::OnShutdown() { FOREACH_SCRIPT(WorldScript)->OnShutdown(); } bool ScriptMgr::OnCriteriaCheck(AchievementCriteriaData const* data, Player* source, Unit* target) { ASSERT(source); // target can be NULL. GET_SCRIPT_RET(AchievementCriteriaScript, data->ScriptId, tmpscript, false); return tmpscript->OnCheck(source, target); } // Player void ScriptMgr::OnPVPKill(Player* killer, Player* killed) { FOREACH_SCRIPT(PlayerScript)->OnPVPKill(killer, killed); } void ScriptMgr::OnCreatureKill(Player* killer, Creature* killed) { FOREACH_SCRIPT(PlayerScript)->OnCreatureKill(killer, killed); } void ScriptMgr::OnPlayerKilledByCreature(Creature* killer, Player* killed) { FOREACH_SCRIPT(PlayerScript)->OnPlayerKilledByCreature(killer, killed); } void ScriptMgr::OnPlayerLevelChanged(Player* player, uint8 oldLevel) { FOREACH_SCRIPT(PlayerScript)->OnLevelChanged(player, oldLevel); } void ScriptMgr::OnPlayerFreeTalentPointsChanged(Player* player, uint32 points) { FOREACH_SCRIPT(PlayerScript)->OnFreeTalentPointsChanged(player, points); } void ScriptMgr::OnPlayerTalentsReset(Player* player, bool noCost) { FOREACH_SCRIPT(PlayerScript)->OnTalentsReset(player, noCost); } void ScriptMgr::OnPlayerMoneyChanged(Player* player, int32& amount) { FOREACH_SCRIPT(PlayerScript)->OnMoneyChanged(player, amount); } void ScriptMgr::OnGivePlayerXP(Player* player, uint32& amount, Unit* victim) { FOREACH_SCRIPT(PlayerScript)->OnGiveXP(player, amount, victim); } void ScriptMgr::OnPlayerReputationChange(Player* player, uint32 factionID, int32& standing, bool incremental) { FOREACH_SCRIPT(PlayerScript)->OnReputationChange(player, factionID, standing, incremental); } void ScriptMgr::OnPlayerDuelRequest(Player* target, Player* challenger) { FOREACH_SCRIPT(PlayerScript)->OnDuelRequest(target, challenger); } void ScriptMgr::OnPlayerDuelStart(Player* player1, Player* player2) { FOREACH_SCRIPT(PlayerScript)->OnDuelStart(player1, player2); } void ScriptMgr::OnPlayerDuelEnd(Player* winner, Player* loser, DuelCompleteType type) { FOREACH_SCRIPT(PlayerScript)->OnDuelEnd(winner, loser, type); } void ScriptMgr::OnPlayerChat(Player* player, uint32 type, uint32 lang, std::string& msg) { FOREACH_SCRIPT(PlayerScript)->OnChat(player, type, lang, msg); } void ScriptMgr::OnPlayerChat(Player* player, uint32 type, uint32 lang, std::string& msg, Player* receiver) { FOREACH_SCRIPT(PlayerScript)->OnChat(player, type, lang, msg, receiver); } void ScriptMgr::OnPlayerChat(Player* player, uint32 type, uint32 lang, std::string& msg, Group* group) { FOREACH_SCRIPT(PlayerScript)->OnChat(player, type, lang, msg, group); } void ScriptMgr::OnPlayerChat(Player* player, uint32 type, uint32 lang, std::string& msg, Guild* guild) { FOREACH_SCRIPT(PlayerScript)->OnChat(player, type, lang, msg, guild); } void ScriptMgr::OnPlayerChat(Player* player, uint32 type, uint32 lang, std::string& msg, Channel* channel) { FOREACH_SCRIPT(PlayerScript)->OnChat(player, type, lang, msg, channel); } void ScriptMgr::OnPlayerEmote(Player* player, uint32 emote) { FOREACH_SCRIPT(PlayerScript)->OnEmote(player, emote); } void ScriptMgr::OnPlayerTextEmote(Player* player, uint32 textEmote, uint32 emoteNum, uint64 guid) { FOREACH_SCRIPT(PlayerScript)->OnTextEmote(player, textEmote, emoteNum, guid); } void ScriptMgr::OnPlayerSpellCast(Player* player, Spell* spell, bool skipCheck) { FOREACH_SCRIPT(PlayerScript)->OnSpellCast(player, spell, skipCheck); } void ScriptMgr::OnPlayerLogin(Player* player) { FOREACH_SCRIPT(PlayerScript)->OnLogin(player); } void ScriptMgr::OnPlayerLogout(Player* player) { FOREACH_SCRIPT(PlayerScript)->OnLogout(player); } void ScriptMgr::OnPlayerCreate(Player* player) { FOREACH_SCRIPT(PlayerScript)->OnCreate(player); } void ScriptMgr::OnPlayerDelete(uint64 guid) { FOREACH_SCRIPT(PlayerScript)->OnDelete(guid); } void ScriptMgr::OnPlayerBindToInstance(Player* player, Difficulty difficulty, uint32 mapid, bool permanent) { FOREACH_SCRIPT(PlayerScript)->OnBindToInstance(player, difficulty, mapid, permanent); } void ScriptMgr::OnPlayerUpdateZone(Player* player, uint32 newZone, uint32 newArea) { FOREACH_SCRIPT(PlayerScript)->OnUpdateZone(player, newZone, newArea); } // Guild void ScriptMgr::OnGuildAddMember(Guild* guild, Player* player, uint8& plRank) { FOREACH_SCRIPT(GuildScript)->OnAddMember(guild, player, plRank); } void ScriptMgr::OnGuildRemoveMember(Guild* guild, Player* player, bool isDisbanding, bool isKicked) { FOREACH_SCRIPT(GuildScript)->OnRemoveMember(guild, player, isDisbanding, isKicked); } void ScriptMgr::OnGuildMOTDChanged(Guild* guild, const std::string& newMotd) { FOREACH_SCRIPT(GuildScript)->OnMOTDChanged(guild, newMotd); } void ScriptMgr::OnGuildInfoChanged(Guild* guild, const std::string& newInfo) { FOREACH_SCRIPT(GuildScript)->OnInfoChanged(guild, newInfo); } void ScriptMgr::OnGuildCreate(Guild* guild, Player* leader, const std::string& name) { FOREACH_SCRIPT(GuildScript)->OnCreate(guild, leader, name); } void ScriptMgr::OnGuildDisband(Guild* guild) { FOREACH_SCRIPT(GuildScript)->OnDisband(guild); } void ScriptMgr::OnGuildMemberWitdrawMoney(Guild* guild, Player* player, uint32 &amount, bool isRepair) { FOREACH_SCRIPT(GuildScript)->OnMemberWitdrawMoney(guild, player, amount, isRepair); } void ScriptMgr::OnGuildMemberDepositMoney(Guild* guild, Player* player, uint32 &amount) { FOREACH_SCRIPT(GuildScript)->OnMemberDepositMoney(guild, player, amount); } void ScriptMgr::OnGuildItemMove(Guild* guild, Player* player, Item* pItem, bool isSrcBank, uint8 srcContainer, uint8 srcSlotId, bool isDestBank, uint8 destContainer, uint8 destSlotId) { FOREACH_SCRIPT(GuildScript)->OnItemMove(guild, player, pItem, isSrcBank, srcContainer, srcSlotId, isDestBank, destContainer, destSlotId); } void ScriptMgr::OnGuildEvent(Guild* guild, uint8 eventType, uint32 playerGuid1, uint32 playerGuid2, uint8 newRank) { FOREACH_SCRIPT(GuildScript)->OnEvent(guild, eventType, playerGuid1, playerGuid2, newRank); } void ScriptMgr::OnGuildBankEvent(Guild* guild, uint8 eventType, uint8 tabId, uint32 playerGuid, uint32 itemOrMoney, uint16 itemStackCount, uint8 destTabId) { FOREACH_SCRIPT(GuildScript)->OnBankEvent(guild, eventType, tabId, playerGuid, itemOrMoney, itemStackCount, destTabId); } // Group void ScriptMgr::OnGroupAddMember(Group* group, uint64 guid) { ASSERT(group); FOREACH_SCRIPT(GroupScript)->OnAddMember(group, guid); } void ScriptMgr::OnGroupInviteMember(Group* group, uint64 guid) { ASSERT(group); FOREACH_SCRIPT(GroupScript)->OnInviteMember(group, guid); } void ScriptMgr::OnGroupRemoveMember(Group* group, uint64 guid, RemoveMethod method, uint64 kicker, const char* reason) { ASSERT(group); FOREACH_SCRIPT(GroupScript)->OnRemoveMember(group, guid, method, kicker, reason); } void ScriptMgr::OnGroupChangeLeader(Group* group, uint64 newLeaderGuid, uint64 oldLeaderGuid) { ASSERT(group); FOREACH_SCRIPT(GroupScript)->OnChangeLeader(group, newLeaderGuid, oldLeaderGuid); } void ScriptMgr::OnGroupDisband(Group* group) { ASSERT(group); FOREACH_SCRIPT(GroupScript)->OnDisband(group); } SpellScriptLoader::SpellScriptLoader(const char* name) : ScriptObject(name) { ScriptRegistry<SpellScriptLoader>::AddScript(this); } ServerScript::ServerScript(const char* name) : ScriptObject(name) { ScriptRegistry<ServerScript>::AddScript(this); } WorldScript::WorldScript(const char* name) : ScriptObject(name) { ScriptRegistry<WorldScript>::AddScript(this); } FormulaScript::FormulaScript(const char* name) : ScriptObject(name) { ScriptRegistry<FormulaScript>::AddScript(this); } WorldMapScript::WorldMapScript(const char* name, uint32 mapId) : ScriptObject(name), MapScript<Map>(mapId) { if (GetEntry() && !GetEntry()->IsWorldMap()) sLog->outError(LOG_FILTER_TSCR, "WorldMapScript for map %u is invalid.", mapId); ScriptRegistry<WorldMapScript>::AddScript(this); } InstanceMapScript::InstanceMapScript(const char* name, uint32 mapId) : ScriptObject(name), MapScript<InstanceMap>(mapId) { if (GetEntry() && !GetEntry()->IsDungeon()) sLog->outError(LOG_FILTER_TSCR, "InstanceMapScript for map %u is invalid.", mapId); ScriptRegistry<InstanceMapScript>::AddScript(this); } BattlegroundMapScript::BattlegroundMapScript(const char* name, uint32 mapId) : ScriptObject(name), MapScript<BattlegroundMap>(mapId) { if (GetEntry() && !GetEntry()->IsBattleground()) sLog->outError(LOG_FILTER_TSCR, "BattlegroundMapScript for map %u is invalid.", mapId); ScriptRegistry<BattlegroundMapScript>::AddScript(this); } ItemScript::ItemScript(const char* name) : ScriptObject(name) { ScriptRegistry<ItemScript>::AddScript(this); } CreatureScript::CreatureScript(const char* name) : ScriptObject(name) { ScriptRegistry<CreatureScript>::AddScript(this); } GameObjectScript::GameObjectScript(const char* name) : ScriptObject(name) { ScriptRegistry<GameObjectScript>::AddScript(this); } AreaTriggerScript::AreaTriggerScript(const char* name) : ScriptObject(name) { ScriptRegistry<AreaTriggerScript>::AddScript(this); } BattlegroundScript::BattlegroundScript(const char* name) : ScriptObject(name) { ScriptRegistry<BattlegroundScript>::AddScript(this); } OutdoorPvPScript::OutdoorPvPScript(const char* name) : ScriptObject(name) { ScriptRegistry<OutdoorPvPScript>::AddScript(this); } CommandScript::CommandScript(const char* name) : ScriptObject(name) { ScriptRegistry<CommandScript>::AddScript(this); } WeatherScript::WeatherScript(const char* name) : ScriptObject(name) { ScriptRegistry<WeatherScript>::AddScript(this); } AuctionHouseScript::AuctionHouseScript(const char* name) : ScriptObject(name) { ScriptRegistry<AuctionHouseScript>::AddScript(this); } ConditionScript::ConditionScript(const char* name) : ScriptObject(name) { ScriptRegistry<ConditionScript>::AddScript(this); } VehicleScript::VehicleScript(const char* name) : ScriptObject(name) { ScriptRegistry<VehicleScript>::AddScript(this); } DynamicObjectScript::DynamicObjectScript(const char* name) : ScriptObject(name) { ScriptRegistry<DynamicObjectScript>::AddScript(this); } TransportScript::TransportScript(const char* name) : ScriptObject(name) { ScriptRegistry<TransportScript>::AddScript(this); } AchievementCriteriaScript::AchievementCriteriaScript(const char* name) : ScriptObject(name) { ScriptRegistry<AchievementCriteriaScript>::AddScript(this); } PlayerScript::PlayerScript(const char* name) : ScriptObject(name) { ScriptRegistry<PlayerScript>::AddScript(this); } GuildScript::GuildScript(const char* name) : ScriptObject(name) { ScriptRegistry<GuildScript>::AddScript(this); } GroupScript::GroupScript(const char* name) : ScriptObject(name) { ScriptRegistry<GroupScript>::AddScript(this); } // Instantiate static members of ScriptRegistry. template<class TScript> std::map<uint32, TScript*> ScriptRegistry<TScript>::ScriptPointerList; template<class TScript> uint32 ScriptRegistry<TScript>::_scriptIdCounter = 0; // Specialize for each script type class like so: template class ScriptRegistry<SpellScriptLoader>; template class ScriptRegistry<ServerScript>; template class ScriptRegistry<WorldScript>; template class ScriptRegistry<FormulaScript>; template class ScriptRegistry<WorldMapScript>; template class ScriptRegistry<InstanceMapScript>; template class ScriptRegistry<BattlegroundMapScript>; template class ScriptRegistry<ItemScript>; template class ScriptRegistry<CreatureScript>; template class ScriptRegistry<GameObjectScript>; template class ScriptRegistry<AreaTriggerScript>; template class ScriptRegistry<BattlegroundScript>; template class ScriptRegistry<OutdoorPvPScript>; template class ScriptRegistry<CommandScript>; template class ScriptRegistry<WeatherScript>; template class ScriptRegistry<AuctionHouseScript>; template class ScriptRegistry<ConditionScript>; template class ScriptRegistry<VehicleScript>; template class ScriptRegistry<DynamicObjectScript>; template class ScriptRegistry<TransportScript>; template class ScriptRegistry<AchievementCriteriaScript>; template class ScriptRegistry<PlayerScript>; template class ScriptRegistry<GuildScript>; template class ScriptRegistry<GroupScript>; // Undefine utility macros. #undef GET_SCRIPT_RET #undef GET_SCRIPT #undef FOREACH_SCRIPT #undef FOR_SCRIPTS_RET #undef FOR_SCRIPTS #undef SCR_REG_LST #undef SCR_REG_ITR #undef SCR_REG_MAP<|fim▁end|>
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>VERSION = (0, 6, 0)<|fim▁hole|><|fim▁end|>
__version__ = '.'.join((str(x) for x in VERSION))
<|file_name|>stock_server.rs<|end_file_name|><|fim▁begin|>mod async_helpers; <|fim▁hole|>use std::time::Duration; use zeromq::*; #[async_helpers::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { let mut rng = rand::thread_rng(); let stocks: Vec<&str> = vec!["AAA", "ABB", "BBB"]; println!("Starting server"); let mut socket = zeromq::PubSocket::new(); socket.bind("tcp://127.0.0.1:5556").await?; println!("Start sending loop"); loop { for stock in &stocks { let price: u32 = rng.gen_range(1, 100); let mut m: ZmqMessage = ZmqMessage::from(*stock); m.push_back(price.to_ne_bytes().to_vec().into()); dbg!(m.clone()); socket.send(m).await?; } async_helpers::sleep(Duration::from_secs(1)).await; } }<|fim▁end|>
use rand::Rng;
<|file_name|>0011_auto_20201109_1100.py<|end_file_name|><|fim▁begin|># Generated by Django 2.2.11 on 2020-11-09 17:00 import daphne_context.utils from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('daphne_context', '0010_userinformation_mycroft_connection'), ]<|fim▁hole|> migrations.RemoveField( model_name='userinformation', name='mycroft_session', ), migrations.CreateModel( name='MycroftUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('mycroft_session', models.CharField(default=daphne_context.utils.generate_mycroft_session, max_length=9)), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]<|fim▁end|>
operations = [
<|file_name|>Types.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # This file is part of nexdatas - Tango Server for NeXus data writer # # Copyright (C) 2012-2017 DESY, Jan Kotanski <[email protected]> # # nexdatas is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # nexdatas is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with nexdatas. If not, see <http://www.gnu.org/licenses/>. # """ Types converters """ import numpy import sys if sys.version_info > (3,): long = int def nptype(dtype): """ converts to numpy types :param dtype: h5 writer type type :type dtype: :obj:`str` :returns: nupy type :rtype: :obj:`str` """ if str(dtype) in ['string', b'string']: return 'str' return dtype class Converters(object): """ set of converters """ @classmethod def toBool(cls, value): """ converts to bool :param value: variable to convert :type value: any :returns: result in bool type :rtype: :obj:`bool` """ if type(value).__name__ == 'str' or type(value).__name__ == 'unicode': lvalue = value.strip().lower() if lvalue == 'false' or lvalue == '0': return False else: return True elif value: return True return False class NTP(object): """ type converter """ #: (:obj:`dict` <:obj:`str` ,:obj:`str` >) map of Python:Tango types pTt = {"long": "DevLong64", "str": "DevString", "unicode": "DevString", "bool": "DevBoolean", "int": "DevLong64", "int64": "DevLong64", "int32": "DevLong", "int16": "DevShort", "int8": "DevUChar", "uint": "DevULong64", "uint64": "DevULong64", "uint32": "DevULong", "uint16": "DevUShort", "uint8": "DevUChar", "float": "DevDouble", "float64": "DevDouble", "float32": "DevFloat", "float16": "DevFloat", "string": "DevString", "str": "DevString"} #: (:obj:`dict` <:obj:`str` , :obj:`str` >) map of NEXUS : numpy types nTnp = {"NX_FLOAT32": "float32", "NX_FLOAT64": "float64", "NX_FLOAT": "float64", "NX_NUMBER": "float64", "NX_INT": "int64", "NX_INT64": "int64", "NX_INT32": "int32", "NX_INT16": "int16", "NX_INT8": "int8", "NX_UINT64": "uint64", "NX_UINT32": "uint32", "NX_UINT16": "uint16", "NX_UINT8": "uint8", "NX_UINT": "uint64", "NX_POSINT": "uint64", "NX_DATE_TIME": "string", "ISO8601": "string", "NX_CHAR": "string", "NX_BOOLEAN": "bool"} #: (:obj:`dict` <:obj:`str` , :obj:`type` or :obj:`types.MethodType` >) \ #: map of type : converting function convert = {"float16": float, "float32": float, "float64": float, "float": float, "int64": long, "int32": int, "int16": int, "int8": int, "int": int, "uint64": long, "uint32": long, "uint16": int, "uint8": int, "uint": int, "string": str, "str": str, "bool": Converters.toBool} <|fim▁hole|> #: (:obj:`dict` <:obj:`str` , :obj:`str` >) map of tag attribute types aTn = {"signal": "NX_INT", "axis": "NX_INT", "primary": "NX_INT32", "offset": "NX_INT", "stride": "NX_INT", "file_time": "NX_DATE_TIME", "file_update_time": "NX_DATE_TIME", "restricts": "NX_INT", "ignoreExtraGroups": "NX_BOOLEAN", "ignoreExtraFields": "NX_BOOLEAN", "ignoreExtraAttributes": "NX_BOOLEAN", "minOccus": "NX_INT", "maxOccus": "NX_INT"} #: (:obj:`dict` <:obj:`str` , :obj:`str` >) \ #: map of vector tag attribute types aTnv = {"vector": "NX_FLOAT"} #: (:obj:`dict` <:obj:`int` , :obj:`str` >) map of rank : data format rTf = {0: "SCALAR", 1: "SPECTRUM", 2: "IMAGE", 3: "VERTEX"} def arrayRank(self, array): """ array rank :brief: It calculates the rank of the array :param array: given array :type array: any :returns: rank :rtype: :obj:`int` """ rank = 0 if hasattr(array, "__iter__") and not \ isinstance(array, (str, bytes)): try: rank = 1 + self.arrayRank(array[0]) except IndexError: if hasattr(array, "shape") and len(array.shape) == 0: rank = 0 else: rank = 1 return rank def arrayRankRShape(self, array): """ array rank, inverse shape and type :brief: It calculates the rank, inverse shape and type of the first element of the list array :param array: given array :type array: any :returns: (rank, inverse shape, type) :rtype: (:obj:`int` , :obj:`list` <:obj:`int` > , :obj:`str` ) """ rank = 0 shape = [] pythonDType = None if hasattr(array, "__iter__") and not \ isinstance(array, (str, bytes)): try: rank, shape, pythonDType = self.arrayRankRShape(array[0]) rank += 1 shape.append(len(array)) except IndexError: if hasattr(array, "shape") and len(array.shape) == 0: rank = 0 if type(array) in [numpy.string_, numpy.str_]: pythonDType = "str" elif hasattr(array, "dtype"): pythonDType = str(array.dtype) else: pythonDType = type(array.tolist()).__name__ else: rank = 1 shape.append(len(array)) else: if type(array) in [numpy.string_, numpy.str_]: pythonDType = "str" elif hasattr(array, "dtype"): pythonDType = str(array.dtype) elif hasattr(array, "tolist"): pythonDType = type(array.tolist()).__name__ else: pythonDType = type(array).__name__ return (rank, shape, pythonDType) def arrayRankShape(self, array): """ array rank, shape and type :brief: It calculates the rank, shape and type of the first element of the list array :param array: given array :type array: any :returns: (rank, shape, type) :rtype: (:obj:`int` , :obj:`list` <:obj:`int` > , :obj:`str` ) """ rank, shape, pythonDType = self.arrayRankRShape(array) if shape: shape.reverse() return (rank, shape, pythonDType) def createArray(self, value, fun=None): """ creates python array from the given array with applied the given function to it elements :param value: given array :type array: any :param fun: applied function :type fun: :obj:`type` or :obj:`types.MethodType` :returns: created array :rtype: :obj:`list` <any> """ if not hasattr(value, "__iter__") or \ isinstance(value, (str, bytes)): return fun(value) if fun else value else: return [self.createArray(v, fun) for v in value]<|fim▁end|>
<|file_name|>Treatment.java<|end_file_name|><|fim▁begin|>/* * Copyright (c) 2019 The Hyve B.V. * * This library is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY, WITHOUT EVEN THE IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS * FOR A PARTICULAR PURPOSE. The software and documentation provided hereunder * is on an "as is" basis, and Memorial Sloan-Kettering Cancer Center has no * obligations to provide maintenance, support, updates, enhancements or * modifications. In no event shall Memorial Sloan-Kettering Cancer Center be * liable to any party for direct, indirect, special, incidental or * consequential damages, including lost profits, arising out of the use of this * software and its documentation, even if Memorial Sloan-Kettering Cancer * Center has been advised of the possibility of such damage. */ /* * This file is part of cBioPortal. * * cBioPortal is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.mskcc.cbio.portal.model; import java.io.Serializable; /** * @author Pim van Nierop, [email protected] */ public class Treatment implements Serializable { private int id; private int geneticEntityId; private String stableId; private String name; private String description; private String refLink; /** * Create a Treatment object from fields * * @param treatmentId Treatment table ID key of the treament * @param geneticEntityId Genetic_entity table ID key associated to the treament record * @param stableId Stable identifier of the treatment used in the cBioPortal instance * @param name Name of the treatment * @param description Description of the treatment * @param refLink Url for the treatment */ public Treatment(int id, int geneticEntityId, String stableId, String name, String description, String refLink) { this.geneticEntityId = geneticEntityId; this.geneticEntityId = geneticEntityId; this.stableId = stableId; this.name = name; this.description = description; this.refLink = refLink; } /** * Create a Treatment object from fields * * @param treatmentId Treatment table ID key of the treament * @param geneticEntityId Genetic_entity table ID key associated to the treament record * @param stableId Stable identifier of the treatment used in the cBioPortal instance * @param name Name of the treatment * @param description Description of the treatment * @param refLink Url for the treatment */ public Treatment(String stableId, String name, String description, String refLink) { this.stableId = stableId; this.name = name; this.description = description; this.refLink = refLink; } /**<|fim▁hole|> * @return the id */ public int getId() { return id; } /** * @param id the id to set */ public void setId(Integer id) { this.id = id; } /** * @return the geneticEntityId */ public int getGeneticEntityId() { return geneticEntityId; } /** * @param geneticEntityId the geneticEntityId to set */ public void setGeneticEntityId(Integer geneticEntityId) { this.geneticEntityId = geneticEntityId; } /** * @return the stableId */ public String getStableId() { return stableId; } /** * @param stableId the stableId to set */ public void setStableId(String stableId) { this.stableId = stableId; } /** * @return the name */ public String getName() { return name; } /** * @param name the name to set */ public void setName(String name) { this.name = name; } /** * @return the description */ public String getDescription() { return description; } /** * @param description the description to set */ public void setDescription(String description) { this.description = description; } /** * @return the refLink */ public String getRefLink() { return refLink; } /** * @param refLink the refLink to set */ public void setRefLink(String refLink) { this.refLink = refLink; } }<|fim▁end|>
<|file_name|>handlers.py<|end_file_name|><|fim▁begin|>import peewee import tornado.web import tornado.gen from windseed.settings import env, db from windseed.base import handler from windseed.apps.admin import urls from windseed.apps.admin.models import User from windseed.apps.web.models import Record class Handler(handler.Handler): def get_current_user(self): """ Current user Do not use this if you have many requests within this application since each request will hit DB, use redis to store intermediate results """ email = self.get_secure_cookie('user') if email: try: user = User.get(User.email == email) except User.DoesNotExist: user = None if user: if user.active and user.superuser: return email else: return None else: return None else: return None def write_error(self, status_code, **kwargs): self.render('admin/error.html', status_code=status_code) def authenticated(func): """ Execute target function if authenticated, redirect to login page otherwise """ def decorated(self, *args, **kwargs): if not self.get_current_user(): self.redirect(urls.login) else: return func(self, *args, **kwargs) return decorated def unauthenticated(func): """ Execute target function if not authenticated, redirect to dashboard otherwise """ def decorated(self, *args, **kwargs): if self.get_current_user(): self.redirect(urls.dashboard) else: return func(self, *args, **kwargs) return decorated class LoginHandler(Handler): """ Login: /admin/login/ """ @tornado.web.addslash @tornado.gen.coroutine @unauthenticated def get(self): """ Render login page """ self.render('admin/login.html') @tornado.gen.coroutine @unauthenticated def post(self): """ Process login form and authenticate user if credentials are valid, redirect back to login page otherwise """ email = self.get_argument('email') password = self.get_argument('password') try: user = User.get(User.email == email) except User.DoesNotExist: user = None if user: if user.active and user.superuser and \ user.check_password(password=password): self.set_secure_cookie('user', user.email) self.redirect(urls.dashboard) else: self.redirect(urls.login) else: self.redirect(urls.login) class LogoutHandler(Handler): """ Logout: /admin/logout/ """ @tornado.web.addslash @tornado.gen.coroutine @authenticated def get(self): """ Clear user authentication and redirect to login page """ self.clear_cookie('user') self.redirect(urls.login) class DashboardHandler(Handler): """ Dashboard: /admin/ """ @tornado.web.addslash @tornado.gen.coroutine @authenticated def get(self): """ Render dashboard """ self.render('admin/dashboard.html') class RecordsHandler(Handler): """ Records: /admin/records/ """ def get_page_context(self): """ Return current page context """ try: page = int(self.get_argument('page', 1)) except ValueError: page = 1 try: count = peewee.SelectQuery(Record).count() except peewee.IntegrityError: count = 0 page_count = int(count/env.ADMIN_ITEMS_PER_PAGE) + \ int(bool(count % env.ADMIN_ITEMS_PER_PAGE)) prev_page, page, next_page = self.paging(page, page_count) try: records = Record\ .select()\ .order_by( Record.active.desc(), Record.uts.desc())\ .paginate(page, paginate_by=env.ADMIN_ITEMS_PER_PAGE) except peewee.IntegrityError: records = [] return dict(records=records, count=count, page_count=page_count, prev_page=prev_page, page=page, next_page=next_page) def ajax_page(self, status): """ Return current page """ record_list = tornado.escape.to_basestring( self.render_string( 'admin/partials/_record_list.html', **self.get_page_context())) self.write(dict(status=status, record_list=record_list)) def ajax_empty(self, status): """ Return empty response """ self.write(dict(status=status)) @tornado.web.addslash @tornado.gen.coroutine @authenticated def get(self): """ Render records """ self.render( 'admin/records.html', **self.get_page_context()) @tornado.gen.coroutine @authenticated def post(self): """ Create, update or delete a record """ create = self.get_argument('create', None) update = self.get_argument('update', None) delete = self.get_argument('delete', None) uid = self.get_argument('uid', None) active = self.get_argument('active', None) active = True if active is not None else False name = self.get_argument('name', None) if name is not None: name = name.strip() if not name: name = None description = self.get_argument('description', None) if create is not None and \ active is not None and \ name is not None: try: with db.pool.atomic(): created = Record.create( active=active,<|fim▁hole|> except peewee.IntegrityError: created = None if created: self.ajax_page('create') else: self.ajax_empty('not_created') elif update is not None and \ uid is not None and \ active is not None and \ name is not None: try: with db.pool.atomic(): updated = Record\ .update( active=active, name=name, description=description)\ .where(Record.uid == uid)\ .execute() except peewee.IntegrityError: updated = None if updated: self.ajax_page('update') else: self.ajax_empty('not_updated') elif delete is not None and \ uid is not None: try: with db.pool.atomic(): deleted = Record\ .delete()\ .where(Record.uid == uid)\ .execute() except peewee.IntegrityError: deleted = None if deleted: self.ajax_page('delete') else: self.ajax_empty('not_deleted') else: self.ajax_empty('not_command')<|fim▁end|>
name=name, description=description)
<|file_name|>Sensor.hpp<|end_file_name|><|fim▁begin|>/// This is the sensor class /// /// Sensor is a box2d fixture that is attached to a parent body /// Sensors are used to detect entities in an area. #pragma once #include <AFP/Scene/SceneNode.hpp> #include <AFP/Entity/Entity.hpp> #include <AFP/Entity/Character.hpp> namespace AFP { class Sensor : public SceneNode { public: enum Type { Foot, Surround, Vision, Jump }; /// Constructor /// /// Sensor(Entity* parent, Type type); /// Return sensor category /// /// Returns the sensor category based on the type virtual unsigned int getCategory() const; /// Create foot sensor /// /// Creates a foot sensor on feet void createFootSensor(float sizeX, float sizeY); /// Create vision sensor ///<|fim▁hole|> ///Takes radius in meters and the angle in degrees as parameters void createVisionSensor(float radius, float angle); /// Create surround sensor /// /// Creates a foot sensor on feet void createSurroundSensor(float radius); /// Create foot sensor /// /// Creates a foot sensor on feet void createJumpSensor(float sizeX, float sizeY); /// Begin contact /// /// Begin contact with an entity void beginContact(); /// Begin contact /// /// Begin contact with an character void beginContact(Character& character); /// End contact /// /// End contact with an entity void endContact(); /// End contact /// /// End contact with a character void endContact(Character& character); private: /// Update /// /// Update sensor data. virtual void updateCurrent(sf::Time dt, CommandQueue& commands); private: /// Sensor fixture /// /// Sensors fixture is linked to the body of the parent. b2Fixture* mFixture; /// Parent entity /// /// Entity on which the sensor is attached to Entity* mParent; /// Type /// /// Type of the sensor Type mType; }; }<|fim▁end|>
/// Creates a vision sensor for the entity.
<|file_name|>loginoption.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Calendar-Indicator # # Copyright (C) 2011-2019 Lorenzo Carbonell Cerezo # [email protected] # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import gi try: gi.require_version('Gtk', '3.0') gi.require_version('Handy', '0.0') except Exception as e: print(e) exit(1) from gi.repository import Gtk from gi.repository import Handy import os import urllib import comun from comun import _ from sidewidget import SideWidget from settingsrow import SettingRow from logindialog import LoginDialog from googlecalendarapi import GoogleCalendar class LoginOption(Gtk.Overlay): def __init__(self): Gtk.Overlay.__init__(self) self.__set_ui() def __set_ui(self): handycolumn = Handy.Column() handycolumn.set_maximum_width(700) handycolumn.set_margin_top(24) self.add(handycolumn) box = Gtk.Box.new(Gtk.Orientation.VERTICAL, 5) handycolumn.add(box) label = Gtk.Label(_('Google calendar permissions')) label.set_name('special') label.set_alignment(0, 0.5) box.add(label) listbox0 = Gtk.ListBox() box.add(listbox0) self.switch1 = Gtk.Switch() self.switch1.connect('button-press-event',self.on_switch1_changed) self.switch1.connect('activate',self.on_switch1_changed) self.switch1.set_valign(Gtk.Align.CENTER) listbox0.add(SettingRow(_('Permissions for Google Calendar'), _('Enable read and write permissions for Google Calendar.'), self.switch1)) self.switch1.set_active(os.path.exists(comun.TOKEN_FILE)) def on_switch1_changed(self,widget,data): if self.switch1.get_active(): if os.path.exists(comun.TOKEN_FILE): os.remove(comun.TOKEN_FILE) else: googlecalendar = GoogleCalendar(token_file = comun.TOKEN_FILE) if googlecalendar.do_refresh_authorization() is None: authorize_url = googlecalendar.get_authorize_url() ld = LoginDialog(authorize_url) ld.run() googlecalendar.get_authorization(ld.code) ld.destroy() if googlecalendar.do_refresh_authorization() is None: md = Gtk.MessageDialog( parent = self, flags = Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT, type = Gtk.MessageType.ERROR, buttons = Gtk.ButtonsType.OK_CANCEL, message_format = _('You have to authorize Calendar-Indicator to use it, do you want to authorize?')) if md.run() == Gtk.ResponseType.CANCEL: exit(3)<|fim▁hole|> if googlecalendar.do_refresh_authorization() is None: exit(3) self.switch1.set_active(True)<|fim▁end|>
else:
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|>from django.core.cache import cache<|fim▁hole|> cache.clear()<|fim▁end|>
def pytest_runtest_setup(item): # Clear the cache before every test
<|file_name|>NaccacheSternTest.java<|end_file_name|><|fim▁begin|>package org.bouncycastle.crypto.test; import java.math.BigInteger; import java.security.SecureRandom; import java.util.Vector; import org.bouncycastle.crypto.AsymmetricCipherKeyPair; import org.bouncycastle.crypto.InvalidCipherTextException; import org.bouncycastle.crypto.engines.NaccacheSternEngine; import org.bouncycastle.crypto.generators.NaccacheSternKeyPairGenerator; import org.bouncycastle.crypto.params.NaccacheSternKeyGenerationParameters; import org.bouncycastle.crypto.params.NaccacheSternKeyParameters; import org.bouncycastle.crypto.params.NaccacheSternPrivateKeyParameters; import org.bouncycastle.util.encoders.Hex; import org.bouncycastle.util.test.SimpleTest; /** * Test case for NaccacheStern cipher. For details on this cipher, please see * * http://www.gemplus.com/smart/rd/publications/pdf/NS98pkcs.pdf * * Performs the following tests: * <ul> * <li> Toy example from the NaccacheSternPaper </li> * <li> 768 bit test with text "Now is the time for all good men." (ripped from RSA test) and * the same test with the first byte replaced by 0xFF </li> * <li> 1024 bit test analog to 768 bit test </li> * </ul> */ public class NaccacheSternTest extends SimpleTest { static final boolean debug = false; static final NaccacheSternEngine cryptEng = new NaccacheSternEngine(); static final NaccacheSternEngine decryptEng = new NaccacheSternEngine(); static { cryptEng.setDebug(debug); decryptEng.setDebug(debug); } // Values from NaccacheStern paper static final BigInteger a = BigInteger.valueOf(101); static final BigInteger u1 = BigInteger.valueOf(3); static final BigInteger u2 = BigInteger.valueOf(5); static final BigInteger u3 = BigInteger.valueOf(7); static final BigInteger b = BigInteger.valueOf(191); static final BigInteger v1 = BigInteger.valueOf(11); static final BigInteger v2 = BigInteger.valueOf(13); static final BigInteger v3 = BigInteger.valueOf(17); static final BigInteger ONE = BigInteger.valueOf(1); static final BigInteger TWO = BigInteger.valueOf(2); static final BigInteger sigma = u1.multiply(u2).multiply(u3).multiply(v1) .multiply(v2).multiply(v3); static final BigInteger p = TWO.multiply(a).multiply(u1).multiply(u2) .multiply(u3).add(ONE); static final BigInteger q = TWO.multiply(b).multiply(v1).multiply(v2) .multiply(v3).add(ONE); static final BigInteger n = p.multiply(q); static final BigInteger phi_n = p.subtract(ONE).multiply(q.subtract(ONE)); <|fim▁hole|> static final Vector smallPrimes = new Vector(); // static final BigInteger paperTest = BigInteger.valueOf(202); static final String input = "4e6f77206973207468652074696d6520666f7220616c6c20676f6f64206d656e"; static final BigInteger paperTest = BigInteger.valueOf(202); // // to check that we handling byte extension by big number correctly. // static final String edgeInput = "ff6f77206973207468652074696d6520666f7220616c6c20676f6f64206d656e"; public String getName() { return "NaccacheStern"; } public void performTest() { // Test with given key from NaccacheSternPaper (totally insecure) // First the Parameters from the NaccacheStern Paper // (see http://www.gemplus.com/smart/rd/publications/pdf/NS98pkcs.pdf ) smallPrimes.addElement(u1); smallPrimes.addElement(u2); smallPrimes.addElement(u3); smallPrimes.addElement(v1); smallPrimes.addElement(v2); smallPrimes.addElement(v3); NaccacheSternKeyParameters pubParameters = new NaccacheSternKeyParameters(false, g, n, sigma.bitLength()); NaccacheSternPrivateKeyParameters privParameters = new NaccacheSternPrivateKeyParameters(g, n, sigma.bitLength(), smallPrimes, phi_n); AsymmetricCipherKeyPair pair = new AsymmetricCipherKeyPair(pubParameters, privParameters); // Initialize Engines with KeyPair if (debug) { System.out.println("initializing encryption engine"); } cryptEng.init(true, pair.getPublic()); if (debug) { System.out.println("initializing decryption engine"); } decryptEng.init(false, pair.getPrivate()); byte[] data = paperTest.toByteArray(); if (!new BigInteger(data).equals(new BigInteger(enDeCrypt(data)))) { fail("failed NaccacheStern paper test"); } // // key generation test // // // 768 Bit test // if (debug) { System.out.println(); System.out.println("768 Bit TEST"); } // specify key generation parameters NaccacheSternKeyGenerationParameters genParam = new NaccacheSternKeyGenerationParameters(new SecureRandom(), 768, 8, 30, debug); // Initialize Key generator and generate key pair NaccacheSternKeyPairGenerator pGen = new NaccacheSternKeyPairGenerator(); pGen.init(genParam); pair = pGen.generateKeyPair(); if (((NaccacheSternKeyParameters)pair.getPublic()).getModulus().bitLength() < 768) { System.out.println("FAILED: key size is <786 bit, exactly " + ((NaccacheSternKeyParameters)pair.getPublic()).getModulus().bitLength() + " bit"); fail("failed key generation (768) length test"); } // Initialize Engines with KeyPair if (debug) { System.out.println("initializing " + genParam.getStrength() + " bit encryption engine"); } cryptEng.init(true, pair.getPublic()); if (debug) { System.out.println("initializing " + genParam.getStrength() + " bit decryption engine"); } decryptEng.init(false, pair.getPrivate()); // Basic data input data = Hex.decode(input); if (!new BigInteger(1, data).equals(new BigInteger(1, enDeCrypt(data)))) { fail("failed encryption decryption (" + genParam.getStrength() + ") basic test"); } // Data starting with FF byte (would be interpreted as negative // BigInteger) data = Hex.decode(edgeInput); if (!new BigInteger(1, data).equals(new BigInteger(1, enDeCrypt(data)))) { fail("failed encryption decryption (" + genParam.getStrength() + ") edgeInput test"); } // // 1024 Bit Test // /* if (debug) { System.out.println(); System.out.println("1024 Bit TEST"); } // specify key generation parameters genParam = new NaccacheSternKeyGenerationParameters(new SecureRandom(), 1024, 8, 40); pGen.init(genParam); pair = pGen.generateKeyPair(); if (((NaccacheSternKeyParameters)pair.getPublic()).getModulus().bitLength() < 1024) { if (debug) { System.out.println("FAILED: key size is <1024 bit, exactly " + ((NaccacheSternKeyParameters)pair.getPublic()).getModulus().bitLength() + " bit"); } fail("failed key generation (1024) length test"); } // Initialize Engines with KeyPair if (debug) { System.out.println("initializing " + genParam.getStrength() + " bit encryption engine"); } cryptEng.init(true, pair.getPublic()); if (debug) { System.out.println("initializing " + genParam.getStrength() + " bit decryption engine"); } decryptEng.init(false, pair.getPrivate()); if (debug) { System.out.println("Data is " + new BigInteger(1, data)); } // Basic data input data = Hex.decode(input); if (!new BigInteger(1, data).equals(new BigInteger(1, enDeCrypt(data)))) { fail("failed encryption decryption (" + genParam.getStrength() + ") basic test"); } // Data starting with FF byte (would be interpreted as negative // BigInteger) data = Hex.decode(edgeInput); if (!new BigInteger(1, data).equals(new BigInteger(1, enDeCrypt(data)))) { fail("failed encryption decryption (" + genParam.getStrength() + ") edgeInput test"); } */ // END OF TEST CASE try { new NaccacheSternEngine().processBlock(new byte[]{ 1 }, 0, 1); fail("failed initialisation check"); } catch (IllegalStateException e) { // expected } catch (InvalidCipherTextException e) { fail("failed initialisation check"); } if (debug) { System.out.println("All tests successful"); } } private byte[] enDeCrypt(byte[] input) { // create work array byte[] data = new byte[input.length]; System.arraycopy(input, 0, data, 0, data.length); // Perform encryption like in the paper from Naccache-Stern if (debug) { System.out.println("encrypting data. Data representation\n" // + "As String:.... " + new String(data) + "\n" + "As BigInteger: " + new BigInteger(1, data)); System.out.println("data length is " + data.length); } try { data = cryptEng.processData(data); } catch (InvalidCipherTextException e) { if (debug) { System.out.println("failed - exception " + e.toString() + "\n" + e.getMessage()); } fail("failed - exception " + e.toString() + "\n" + e.getMessage()); } if (debug) { System.out.println("enrypted data representation\n" // + "As String:.... " + new String(data) + "\n" + "As BigInteger: " + new BigInteger(1, data)); System.out.println("data length is " + data.length); } try { data = decryptEng.processData(data); } catch (InvalidCipherTextException e) { if (debug) { System.out.println("failed - exception " + e.toString() + "\n" + e.getMessage()); } fail("failed - exception " + e.toString() + "\n" + e.getMessage()); } if (debug) { System.out.println("decrypted data representation\n" // + "As String:.... " + new String(data) + "\n" + "As BigInteger: " + new BigInteger(1, data)); System.out.println("data length is " + data.length); } return data; } public static void main(String[] args) { runTest(new NaccacheSternTest()); } }<|fim▁end|>
static final BigInteger g = BigInteger.valueOf(131);
<|file_name|>node.go<|end_file_name|><|fim▁begin|>package wikimediaparser import ( "fmt" "github.com/golang/glog" "strings" ) // Node as it is emitted by the parser // - contains a NodeType for clear identification // - a string val Val // - a list of named parameters which are actually Node Lists // -a list of anonymous parameters, a Node list again type Node struct { Typ nodeType Val string NamedParams map[string]Nodes Params []Nodes } // Return the Node text content, without any decoration func (n *Node) StringRepresentation() string { glog.V(7).Infof("stringRepresentation for %+v", n) switch n.Typ { case NodeText, NodeInvalid: return n.Val case NodeLink, NodeELink: if len(n.Params) > 0 { return n.Params[0].StringRepresentation() } else { return n.StringParamOrEmpty("link") } case NodeTemplate: if len(n.Params) > 0 { return n.Params[0].StringRepresentation() } else { return "" } default: return "" } } func (n *Node) String() string { switch n.Typ { case NodeText, NodeInvalid: return fmt.Sprintf("%q", n.Val) } o := fmt.Sprintf("%s: %s", n.Typ.String(), n.Val) for ix, p := range n.Params { o += fmt.Sprintf("\t%d: %s\n", ix, p.String()) } for k, v := range n.NamedParams { o += fmt.Sprintf("\t%s: %s\n", k, v.String()) } return o } // StringParam returns the string value of a given named parameter func (n *Node) StringParam(k string) string { param, ok := n.NamedParams[k] if !ok { glog.V(2).Infof("Unable to extract parameter \"%s\" for node %s", k, n.String()) } else { return param.StringRepresentation() } return "" } func (n *Node) StringParamOrEmpty(k string) string { glog.V(2).Infof("StringParamOrEmpty for %s", k) v, ok := n.NamedParams[k] if ok { ret := v.StringRepresentation() return strings.Trim(ret, " \n") }<|fim▁hole|> return Node{Typ: NodeEmpty} } type nodeType int const ( NodeInvalid = nodeType(iota) NodeText NodeTitle NodeLink NodeELink NodeTemplate NodePlaceholder NodeEq NodeUnknown NodeEmpty ) func (n nodeType) String() string { switch n { case NodeText: return "Text" case NodeLink: return "Link" case NodeELink: return "ELink" case NodeTemplate: return "Template" case NodeEq: return " EQ " case NodeTitle: return " Title " case NodePlaceholder: return " Placeholder " case NodeUnknown: return "UNK" case NodeInvalid: return " INV " default: return "????" } }<|fim▁end|>
return "" } func EmptyNode() Node {
<|file_name|>size_of.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ use script::test::size_of; // Macro so that we can stringify type names // I'd really prefer the tests themselves to be run at plugin time, // however rustc::middle doesn't have access to the full type data macro_rules! sizeof_checker ( ($testname: ident, $t: ident, $known_size: expr) => ( #[test] fn $testname() { let new = size_of::$t(); let old = $known_size; if new < old { panic!("Your changes have decreased the stack size of commonly used DOM struct {} from {} to {}. \ Good work! Please update the size in tests/unit/script/size_of.rs.", stringify!($t), old, new) } else if new > old { panic!("Your changes have increased the stack size of commonly used DOM struct {} from {} to {}. \ These structs are present in large quantities in the DOM, and increasing the size \ may dramatically affect our memory footprint. Please consider choosing a design which \ avoids this increase. If you feel that the increase is necessary, \ update to the new size in tests/unit/script/size_of.rs.", stringify!($t), old, new) } }); ); // Update the sizes here sizeof_checker!(size_event_target, EventTarget, 48);<|fim▁hole|>sizeof_checker!(size_div, HTMLDivElement, 376); sizeof_checker!(size_span, HTMLSpanElement, 376); sizeof_checker!(size_text, Text, 216); sizeof_checker!(size_characterdata, CharacterData, 216);<|fim▁end|>
sizeof_checker!(size_node, Node, 184); sizeof_checker!(size_element, Element, 360); sizeof_checker!(size_htmlelement, HTMLElement, 376);
<|file_name|>index.stories.tsx<|end_file_name|><|fim▁begin|>/* import * as React from 'react' import SelectOtherDevice from '.' import {action, storiesOf} from '../../stories/storybook' import * as Constants from '../../constants/provision' import * as Types from '../../constants/types/provision' const rd = { cTime: 0, encryptKey: '', lastUsedTime: 0, mTime: 0, status: 0, verifyKey: '', } const props = { devices: [ Constants.rpcDeviceToDevice({ ...rd, deviceID: '1', deviceNumberOfType: 1, name: 'iphone', type: 'mobile', }), Constants.rpcDeviceToDevice({ ...rd, deviceID: '2', deviceNumberOfType: 2, name: 'Home Computer', type: 'desktop', }), Constants.rpcDeviceToDevice({ ...rd, deviceID: '3', deviceNumberOfType: 3, name: 'Android Nexus 5x', type: 'mobile', }), Constants.rpcDeviceToDevice({ ...rd, deviceID: '4', deviceNumberOfType: 4, name: 'Tuba Contest', type: 'backup',<|fim▁hole|> onSelect: action('onSelect'), } const tonsOfDevices: Array<Types.Device> = [] for (var i = 0; i < 100; ++i) { let type: string switch (i % 3) { case 0: type = 'desktop' break case 1: type = 'mobile' break default: type = 'backup' break } tonsOfDevices.push( Constants.rpcDeviceToDevice({ ...rd, deviceID: String(i + 1), deviceNumberOfType: i, name: 'name: ' + String(i), type, }) ) } const load = () => { storiesOf('Provision/SelectOtherDevice', module) .add('Normal', () => <SelectOtherDevice {...props} />) .add('Tons', () => <SelectOtherDevice {...props} devices={tonsOfDevices} />) } export default load */ export default {}<|fim▁end|>
}), ], onBack: action('onBack'), onResetAccount: action('onResetAccount'),
<|file_name|>server.go<|end_file_name|><|fim▁begin|>package main import ( "fmt" "log" "net/http" "gopkg.in/jcelliott/turnpike.v1" ) func main() { s := turnpike.NewServer() http.Handle("/ws", s.Handler) http.Handle("/", http.FileServer(http.Dir("web")))<|fim▁hole|> if err := http.ListenAndServe(":8080", nil); err != nil { log.Fatal("ListenAndServe:", err) } }<|fim▁end|>
fmt.Println("Listening on port 8080")
<|file_name|>forms.py<|end_file_name|><|fim▁begin|>from django import forms <|fim▁hole|># future use<|fim▁end|>
<|file_name|>weak_crypto.py<|end_file_name|><|fim▁begin|>from cryptography.hazmat import backends from cryptography.hazmat.primitives.asymmetric import ec, dsa, rsa # Crypto and Cryptodome have same API if random(): from Crypto.PublicKey import DSA from Crypto.PublicKey import RSA else: from Cryptodome.PublicKey import DSA from Cryptodome.PublicKey import RSA RSA_WEAK = 1024 RSA_OK = 2048 RSA_STRONG = 3076 DSA_WEAK = 1024 DSA_OK = 2048 DSA_STRONG = 3076 BIG = 10000 EC_WEAK = ec.SECT163K1() # has key size of 163 EC_OK = ec.SECP224R1() EC_STRONG = ec.SECP384R1() EC_BIG = ec.SECT571R1() dsa_gen_key = dsa.generate_private_key ec_gen_key = ec.generate_private_key rsa_gen_key = rsa.generate_private_key # Strong and OK keys. dsa_gen_key(key_size=DSA_OK) dsa_gen_key(key_size=DSA_STRONG) dsa_gen_key(key_size=BIG) ec_gen_key(curve=EC_OK) ec_gen_key(curve=EC_STRONG) ec_gen_key(curve=EC_BIG) rsa_gen_key(public_exponent=65537, key_size=RSA_OK) rsa_gen_key(public_exponent=65537, key_size=RSA_STRONG) rsa_gen_key(public_exponent=65537, key_size=BIG) DSA.generate(bits=RSA_OK) DSA.generate(bits=RSA_STRONG) RSA.generate(bits=RSA_OK) RSA.generate(bits=RSA_STRONG) dsa_gen_key(DSA_OK) dsa_gen_key(DSA_STRONG) dsa_gen_key(BIG) ec_gen_key(EC_OK) ec_gen_key(EC_STRONG) ec_gen_key(EC_BIG) rsa_gen_key(65537, RSA_OK) rsa_gen_key(65537, RSA_STRONG) rsa_gen_key(65537, BIG) DSA.generate(DSA_OK) DSA.generate(DSA_STRONG) RSA.generate(RSA_OK) RSA.generate(RSA_STRONG) # Weak keys dsa_gen_key(DSA_WEAK) ec_gen_key(EC_WEAK) rsa_gen_key(65537, RSA_WEAK) dsa_gen_key(key_size=DSA_WEAK) ec_gen_key(curve=EC_WEAK) rsa_gen_key(65537, key_size=RSA_WEAK) DSA.generate(DSA_WEAK) RSA.generate(RSA_WEAK) # ------------------------------------------------------------------------------ # Through function calls def make_new_rsa_key_weak(bits): return RSA.generate(bits) # NOT OK make_new_rsa_key_weak(RSA_WEAK) def make_new_rsa_key_strong(bits): return RSA.generate(bits) # OK<|fim▁hole|>make_new_rsa_key_strong(RSA_STRONG) def only_used_by_test(bits): # Although this call will technically not be ok, since it's only used in a test, we don't want to alert on it. return RSA.generate(bits)<|fim▁end|>
<|file_name|>_operations.py<|end_file_name|><|fim▁begin|># coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.mgmt.core.exceptions import ARMErrorFormat from msrest import Serializer from .. import models as _models from .._vendor import _convert_request T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False<|fim▁hole|>def build_list_request( **kwargs: Any ) -> HttpRequest: api_version = "2021-07-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/providers/Microsoft.Compute/operations') # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) class Operations(object): """Operations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.compute.v2021_07_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config @distributed_trace def list( self, **kwargs: Any ) -> Iterable["_models.ComputeOperationListResult"]: """Gets a list of compute operations. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ComputeOperationListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_07_01.models.ComputeOperationListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ComputeOperationListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_request( template_url=self.list.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_request( template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("ComputeOperationListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/providers/Microsoft.Compute/operations'} # type: ignore<|fim▁end|>
<|file_name|>common.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- """ This is the common settings file, intended to set sane defaults. If you have a piece of configuration that's dependent on a set of feature flags being set, then create a function that returns the calculated value based on the value of FEATURES[...]. Modules that extend this one can change the feature configuration in an environment specific config file and re-calculate those values. We should make a method that calls all these config methods so that you just make one call at the end of your site-specific dev file to reset all the dependent variables (like INSTALLED_APPS) for you. Longer TODO: 1. Right now our treatment of static content in general and in particular course-specific static content is haphazard. 2. We should have a more disciplined approach to feature flagging, even if it just means that we stick them in a dict called FEATURES. 3. We need to handle configuration for multiple courses. This could be as multiple sites, but we do need a way to map their data assets. """ # We intentionally define lots of variables that aren't used, and # want to import all variables from base settings files # pylint: disable=W0401, W0611, W0614, C0103 import sys import os import imp from path import path from warnings import simplefilter from django.utils.translation import ugettext_lazy as _ from .discussionsettings import * from xmodule.modulestore.modulestore_settings import update_module_store_settings from lms.lib.xblock.mixin import LmsBlockMixin ################################### FEATURES ################################### # The display name of the platform to be used in templates/emails/etc. PLATFORM_NAME = "Your Platform Name Here" CC_MERCHANT_NAME = PLATFORM_NAME PLATFORM_FACEBOOK_ACCOUNT = "http://www.facebook.com/YourPlatformFacebookAccount" PLATFORM_TWITTER_ACCOUNT = "@YourPlatformTwitterAccount" PLATFORM_TWITTER_URL = "https://twitter.com/YourPlatformTwitterAccount" PLATFORM_MEETUP_URL = "http://www.meetup.com/YourMeetup" PLATFORM_LINKEDIN_URL = "http://www.linkedin.com/company/YourPlatform" PLATFORM_GOOGLE_PLUS_URL = "https://plus.google.com/YourGooglePlusAccount/" COURSEWARE_ENABLED = True SCHOOL_ENABLED =True ENABLE_JASMINE = False DISCUSSION_SETTINGS = { 'MAX_COMMENT_DEPTH': 2, } # Features FEATURES = { 'SAMPLE': False, 'USE_DJANGO_PIPELINE': True, 'DISPLAY_DEBUG_INFO_TO_STAFF': True, 'DISPLAY_HISTOGRAMS_TO_STAFF': False, # For large courses this slows down courseware access for staff. 'REROUTE_ACTIVATION_EMAIL': False, # nonempty string = address for all activation emails 'DEBUG_LEVEL': 0, # 0 = lowest level, least verbose, 255 = max level, most verbose ## DO NOT SET TO True IN THIS FILE ## Doing so will cause all courses to be released on production 'DISABLE_START_DATES': False, # When True, all courses will be active, regardless of start date # When True, will only publicly list courses by the subdomain. Expects you # to define COURSE_LISTINGS, a dictionary mapping subdomains to lists of # course_ids (see dev_int.py for an example) 'SUBDOMAIN_COURSE_LISTINGS': False, # When True, will override certain branding with university specific values # Expects a SUBDOMAIN_BRANDING dictionary that maps the subdomain to the # university to use for branding purposes 'SUBDOMAIN_BRANDING': False, 'FORCE_UNIVERSITY_DOMAIN': False, # set this to the university domain to use, as an override to HTTP_HOST # set to None to do no university selection # for consistency in user-experience, keep the value of the following 3 settings # in sync with the corresponding ones in cms/envs/common.py 'ENABLE_DISCUSSION_SERVICE': True, 'ENABLE_TEXTBOOK': True, 'ENABLE_STUDENT_NOTES': True, # enables the student notes API and UI. # discussion home panel, which includes a subscription on/off setting for discussion digest emails. # this should remain off in production until digest notifications are online. 'ENABLE_DISCUSSION_HOME_PANEL': False, 'ENABLE_PSYCHOMETRICS': False, # real-time psychometrics (eg item response theory analysis in instructor dashboard) 'ENABLE_DJANGO_ADMIN_SITE': True, # set true to enable django's admin site, even on prod (e.g. for course ops) 'ENABLE_SQL_TRACKING_LOGS': False, 'ENABLE_LMS_MIGRATION': False, 'ENABLE_MANUAL_GIT_RELOAD': False, 'ENABLE_MASQUERADE': True, # allow course staff to change to student view of courseware 'ENABLE_SYSADMIN_DASHBOARD': False, # sysadmin dashboard, to see what courses are loaded, to delete & load courses 'DISABLE_LOGIN_BUTTON': False, # used in systems where login is automatic, eg MIT SSL # extrernal access methods 'ACCESS_REQUIRE_STAFF_FOR_COURSE': False, 'AUTH_USE_OPENID': False, 'AUTH_USE_CERTIFICATES': False, 'AUTH_USE_OPENID_PROVIDER': False, # Even though external_auth is in common, shib assumes the LMS views / urls, so it should only be enabled # in LMS 'AUTH_USE_SHIB': False, 'AUTH_USE_CAS': False, # This flag disables the requirement of having to agree to the TOS for users registering # with Shib. Feature was requested by Stanford's office of general counsel 'SHIB_DISABLE_TOS': False, # Toggles OAuth2 authentication provider 'ENABLE_OAUTH2_PROVIDER': False, # Can be turned off if course lists need to be hidden. Effects views and templates. 'COURSES_ARE_BROWSABLE': True, # Enables ability to restrict enrollment in specific courses by the user account login method 'RESTRICT_ENROLL_BY_REG_METHOD': False, # Enables the LMS bulk email feature for course staff 'ENABLE_INSTRUCTOR_EMAIL': True, # If True and ENABLE_INSTRUCTOR_EMAIL: Forces email to be explicitly turned on # for each course via django-admin interface. # If False and ENABLE_INSTRUCTOR_EMAIL: Email will be turned on by default # for all Mongo-backed courses. 'REQUIRE_COURSE_EMAIL_AUTH': True, # Analytics experiments - shows instructor analytics tab in LMS instructor dashboard. # Enabling this feature depends on installation of a separate analytics server. 'ENABLE_INSTRUCTOR_ANALYTICS': False, # enable analytics server. # WARNING: THIS SHOULD ALWAYS BE SET TO FALSE UNDER NORMAL # LMS OPERATION. See analytics.py for details about what # this does. 'RUN_AS_ANALYTICS_SERVER_ENABLED': False, # Flip to True when the YouTube iframe API breaks (again) 'USE_YOUTUBE_OBJECT_API': False, # Give a UI to show a student's submission history in a problem by the # Staff Debug tool. 'ENABLE_STUDENT_HISTORY_VIEW': True, # Segment.io for LMS--need to explicitly turn it on for production. 'SEGMENT_IO_LMS': False, # Provide a UI to allow users to submit feedback from the LMS (left-hand help modal) 'ENABLE_FEEDBACK_SUBMISSION': False, # Turn on a page that lets staff enter Python code to be run in the # sandbox, for testing whether it's enabled properly. 'ENABLE_DEBUG_RUN_PYTHON': False, # Enable URL that shows information about the status of variuous services 'ENABLE_SERVICE_STATUS': False, # Toggle to indicate use of a custom theme 'USE_CUSTOM_THEME': False, # Don't autoplay videos for students 'AUTOPLAY_VIDEOS': False, # Enable instructor dash to submit background tasks 'ENABLE_INSTRUCTOR_BACKGROUND_TASKS': True, # Enable instructor to assign individual due dates 'INDIVIDUAL_DUE_DATES': False, # Enable legacy instructor dashboard 'ENABLE_INSTRUCTOR_LEGACY_DASHBOARD': True, # Is this an edX-owned domain? (used on instructor dashboard) 'IS_EDX_DOMAIN': False, # Toggle to enable certificates of courses on dashboard 'ENABLE_VERIFIED_CERTIFICATES': False, # Allow use of the hint managment instructor view. 'ENABLE_HINTER_INSTRUCTOR_VIEW': False, # for load testing 'AUTOMATIC_AUTH_FOR_TESTING': False, # Toggle to enable chat availability (configured on a per-course # basis in Studio) 'ENABLE_CHAT': False, # Allow users to enroll with methods other than just honor code certificates 'MULTIPLE_ENROLLMENT_ROLES': False, # Toggle the availability of the shopping cart page 'ENABLE_SHOPPING_CART': False, # Toggle storing detailed billing information 'STORE_BILLING_INFO': False, # Enable flow for payments for course registration (DIFFERENT from verified student flow) 'ENABLE_PAID_COURSE_REGISTRATION': False, # Automatically approve student identity verification attempts 'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': False, # Disable instructor dash buttons for downloading course data # when enrollment exceeds this number 'MAX_ENROLLMENT_INSTR_BUTTONS': 200, # Grade calculation started from the new instructor dashboard will write # grades CSV files to S3 and give links for downloads. 'ENABLE_S3_GRADE_DOWNLOADS': False, # whether to use password policy enforcement or not 'ENFORCE_PASSWORD_POLICY': False, # Give course staff unrestricted access to grade downloads (if set to False, # only edX superusers can perform the downloads) 'ALLOW_COURSE_STAFF_GRADE_DOWNLOADS': False, 'ENABLED_PAYMENT_REPORTS': ["refund_report", "itemized_purchase_report", "university_revenue_share", "certificate_status"], # Turn off account locking if failed login attempts exceeds a limit 'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': False, # Hide any Personally Identifiable Information from application logs 'SQUELCH_PII_IN_LOGS': False, # Toggles the embargo functionality, which enable embargoing for particular courses 'EMBARGO': False, # Toggles the embargo site functionality, which enable embargoing for the whole site 'SITE_EMBARGOED': False, # Whether the Wiki subsystem should be accessible via the direct /wiki/ paths. Setting this to True means # that people can submit content and modify the Wiki in any arbitrary manner. We're leaving this as True in the # defaults, so that we maintain current behavior 'ALLOW_WIKI_ROOT_ACCESS': True, # Turn on/off Microsites feature 'USE_MICROSITES': False, # Turn on third-party auth. Disabled for now because full implementations are not yet available. Remember to syncdb # if you enable this; we don't create tables by default. 'ENABLE_THIRD_PARTY_AUTH': False, # Toggle to enable alternate urls for marketing links 'ENABLE_MKTG_SITE': False, # Prevent concurrent logins per user 'PREVENT_CONCURRENT_LOGINS': False, # Turn off Advanced Security by default 'ADVANCED_SECURITY': False, # Show a "Download your certificate" on the Progress page if the lowest # nonzero grade cutoff is met 'SHOW_PROGRESS_SUCCESS_BUTTON': False, # When a logged in user goes to the homepage ('/') should the user be # redirected to the dashboard - this is default Open edX behavior. Set to # False to not redirect the user 'ALWAYS_REDIRECT_HOMEPAGE_TO_DASHBOARD_FOR_AUTHENTICATED_USER': True, # Expose Mobile REST API. Note that if you use this, you must also set # ENABLE_OAUTH2_PROVIDER to True 'ENABLE_MOBILE_REST_API': False, # Enable the new dashboard, account, and profile pages 'ENABLE_NEW_DASHBOARD': False, # Enable the combined login/registration form 'ENABLE_COMBINED_LOGIN_REGISTRATION': False, # Show a section in the membership tab of the instructor dashboard # to allow an upload of a CSV file that contains a list of new accounts to create # and register for course. 'ALLOW_AUTOMATED_SIGNUPS': False, # Display demographic data on the analytics tab in the instructor dashboard. 'DISPLAY_ANALYTICS_DEMOGRAPHICS': True, # Enable display of enrollment counts in instructor and legacy analytics dashboard 'DISPLAY_ANALYTICS_ENROLLMENTS': True, } # Ignore static asset files on import which match this pattern ASSET_IGNORE_REGEX = r"(^\._.*$)|(^\.DS_Store$)|(^.*~$)" # Used for A/B testing DEFAULT_GROUPS = [] # If this is true, random scores will be generated for the purpose of debugging the profile graphs GENERATE_PROFILE_SCORES = False # Used with XQueue XQUEUE_WAITTIME_BETWEEN_REQUESTS = 5 # seconds ############################# SET PATH INFORMATION ############################# PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /edx-platform/lms REPO_ROOT = PROJECT_ROOT.dirname() COMMON_ROOT = REPO_ROOT / "common" ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /edx-platform is in COURSES_ROOT = ENV_ROOT / "data" DATA_DIR = COURSES_ROOT # TODO: Remove the rest of the sys.path modification here and in cms/envs/common.py sys.path.append(REPO_ROOT) sys.path.append(PROJECT_ROOT / 'djangoapps') sys.path.append(COMMON_ROOT / 'djangoapps') sys.path.append(COMMON_ROOT / 'lib') # For Node.js system_node_path = os.environ.get("NODE_PATH", REPO_ROOT / 'node_modules') node_paths = [ COMMON_ROOT / "static/js/vendor", COMMON_ROOT / "static/coffee/src", system_node_path, ] NODE_PATH = ':'.join(node_paths) # For geolocation ip database GEOIP_PATH = REPO_ROOT / "common/static/data/geoip/GeoIP.dat" GEOIPV6_PATH = REPO_ROOT / "common/static/data/geoip/GeoIPv6.dat" # Where to look for a status message STATUS_MESSAGE_PATH = ENV_ROOT / "status_message.json" ############################ OpenID Provider ################################## OPENID_PROVIDER_TRUSTED_ROOTS = ['cs50.net', '*.cs50.net'] ############################ OAUTH2 Provider ################################### # OpenID Connect issuer ID. Normally the URL of the authentication endpoint. OAUTH_OIDC_ISSUER = 'https:/example.com/oauth2' # OpenID Connect claim handlers OAUTH_OIDC_ID_TOKEN_HANDLERS = ( 'oauth2_provider.oidc.handlers.BasicIDTokenHandler', 'oauth2_provider.oidc.handlers.ProfileHandler', 'oauth2_provider.oidc.handlers.EmailHandler', 'oauth2_handler.IDTokenHandler' ) OAUTH_OIDC_USERINFO_HANDLERS = ( 'oauth2_provider.oidc.handlers.BasicUserInfoHandler', 'oauth2_provider.oidc.handlers.ProfileHandler', 'oauth2_provider.oidc.handlers.EmailHandler', 'oauth2_handler.UserInfoHandler' ) ################################## EDX WEB ##################################### # This is where we stick our compiled template files. Most of the app uses Mako # templates import tempfile MAKO_MODULE_DIR = os.path.join(tempfile.gettempdir(), 'mako_lms') MAKO_TEMPLATES = {} MAKO_TEMPLATES['main'] = [PROJECT_ROOT / 'templates', COMMON_ROOT / 'templates', COMMON_ROOT / 'lib' / 'capa' / 'capa' / 'templates', COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates'] # This is where Django Template lookup is defined. There are a few of these # still left lying around. TEMPLATE_DIRS = [ PROJECT_ROOT / "templates", COMMON_ROOT / 'templates', COMMON_ROOT / 'lib' / 'capa' / 'capa' / 'templates', COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates', ] TEMPLATE_CONTEXT_PROCESSORS = ( 'django.core.context_processors.request', 'django.core.context_processors.static', 'django.contrib.messages.context_processors.messages', 'django.core.context_processors.i18n', 'django.contrib.auth.context_processors.auth', # this is required for admin 'django.core.context_processors.csrf', # Added for django-wiki 'django.core.context_processors.media', 'django.core.context_processors.tz', 'django.contrib.messages.context_processors.messages', 'sekizai.context_processors.sekizai', # Hack to get required link URLs to password reset templates 'edxmako.shortcuts.marketing_link_context_processor', # Allows the open edX footer to be leveraged in Django Templates. 'edxmako.shortcuts.open_source_footer_context_processor', # Shoppingcart processor (detects if request.user has a cart) 'shoppingcart.context_processor.user_has_cart_context_processor', # Allows the open edX footer to be leveraged in Django Templates. 'edxmako.shortcuts.microsite_footer_context_processor', ) # use the ratelimit backend to prevent brute force attacks AUTHENTICATION_BACKENDS = ( 'ratelimitbackend.backends.RateLimitModelBackend', ) STUDENT_FILEUPLOAD_MAX_SIZE = 4 * 1000 * 1000 # 4 MB MAX_FILEUPLOADS_PER_INPUT = 20 # FIXME: # We should have separate S3 staged URLs in case we need to make changes to # these assets and test them. LIB_URL = '/static/js/' # Dev machines shouldn't need the book # BOOK_URL = '/static/book/' BOOK_URL = 'https://mitxstatic.s3.amazonaws.com/book_images/' # For AWS deploys RSS_TIMEOUT = 600 # Configuration option for when we want to grab server error pages STATIC_GRAB = False DEV_CONTENT = True EDX_ROOT_URL = '' LOGIN_REDIRECT_URL = EDX_ROOT_URL + '/accounts/login' LOGIN_URL = EDX_ROOT_URL + '/accounts/login' COURSE_NAME = "6.002_Spring_2012" COURSE_NUMBER = "6.002x" COURSE_TITLE = "Circuits and Electronics" ### Dark code. Should be enabled in local settings for devel. ENABLE_MULTICOURSE = False # set to False to disable multicourse display (see lib.util.views.edXhome) WIKI_ENABLED = False ### COURSE_DEFAULT = '6.002x_Fall_2012' COURSE_SETTINGS = { '6.002x_Fall_2012': { 'number': '6.002x', 'title': 'Circuits and Electronics', 'xmlpath': '6002x/', 'location': 'i4x://edx/6002xs12/course/6.002x_Fall_2012', } } # IP addresses that are allowed to reload the course, etc. # TODO (vshnayder): Will probably need to change as we get real access control in. LMS_MIGRATION_ALLOWED_IPS = [] # These are standard regexes for pulling out info like course_ids, usage_ids, etc. # They are used so that URLs with deprecated-format strings still work. # Note: these intentionally greedily grab all chars up to the next slash including any pluses # DHM: I really wanted to ensure the separators were the same (+ or /) but all patts I tried had # too many inadvertent side effects :-( COURSE_KEY_PATTERN = r'(?P<course_key_string>[^/+]+(/|\+)[^/+]+(/|\+)[^/]+)' COURSE_ID_PATTERN = COURSE_KEY_PATTERN.replace('course_key_string', 'course_id') COURSE_KEY_REGEX = COURSE_KEY_PATTERN.replace('P<course_key_string>', ':') USAGE_KEY_PATTERN = r'(?P<usage_key_string>(?:i4x://?[^/]+/[^/]+/[^/]+/[^@]+(?:@[^/]+)?)|(?:[^/]+))' ASSET_KEY_PATTERN = r'(?P<asset_key_string>(?:/?c4x(:/)?/[^/]+/[^/]+/[^/]+/[^@]+(?:@[^/]+)?)|(?:[^/]+))' USAGE_ID_PATTERN = r'(?P<usage_id>(?:i4x://?[^/]+/[^/]+/[^/]+/[^@]+(?:@[^/]+)?)|(?:[^/]+))' COURSE_KIND_PATTERN = r'(?P<course_kind>(?:[^/]+)|(?:[^/]+))' SCHOOL_NAME_PATTERN = r'(?P<school_key_string>(?:[^/]+)|(?:[^/]+))' ############################## EVENT TRACKING ################################# # FIXME: Should we be doing this truncation? TRACK_MAX_EVENT = 50000 DEBUG_TRACK_LOG = False TRACKING_BACKENDS = { 'logger': { 'ENGINE': 'track.backends.logger.LoggerBackend', 'OPTIONS': { 'name': 'tracking' } } } # We're already logging events, and we don't want to capture user # names/passwords. Heartbeat events are likely not interesting. TRACKING_IGNORE_URL_PATTERNS = [r'^/event', r'^/login', r'^/heartbeat', r'^/segmentio/event'] EVENT_TRACKING_ENABLED = True EVENT_TRACKING_BACKENDS = { 'logger': { 'ENGINE': 'eventtracking.backends.logger.LoggerBackend', 'OPTIONS': { 'name': 'tracking', 'max_event_size': TRACK_MAX_EVENT, } } } EVENT_TRACKING_PROCESSORS = [ { 'ENGINE': 'track.shim.LegacyFieldMappingProcessor' }, { 'ENGINE': 'track.shim.VideoEventProcessor' } ] # Backwards compatibility with ENABLE_SQL_TRACKING_LOGS feature flag. # In the future, adding the backend to TRACKING_BACKENDS should be enough. if FEATURES.get('ENABLE_SQL_TRACKING_LOGS'): TRACKING_BACKENDS.update({ 'sql': { 'ENGINE': 'track.backends.django.DjangoBackend' } }) EVENT_TRACKING_BACKENDS.update({ 'sql': { 'ENGINE': 'track.backends.django.DjangoBackend' } }) TRACKING_SEGMENTIO_WEBHOOK_SECRET = None TRACKING_SEGMENTIO_ALLOWED_TYPES = ['track'] TRACKING_SEGMENTIO_SOURCE_MAP = { 'analytics-android': 'mobile', 'analytics-ios': 'mobile', } ######################## GOOGLE ANALYTICS ########################### GOOGLE_ANALYTICS_ACCOUNT = None GOOGLE_ANALYTICS_LINKEDIN = 'GOOGLE_ANALYTICS_LINKEDIN_DUMMY' ######################## OPTIMIZELY ########################### OPTIMIZELY_PROJECT_ID = None ######################## subdomain specific settings ########################### COURSE_LISTINGS = {} SUBDOMAIN_BRANDING = {} VIRTUAL_UNIVERSITIES = [] ############# XBlock Configuration ########## # Import after sys.path fixup from xmodule.modulestore.inheritance import InheritanceMixin from xmodule.modulestore import prefer_xmodules from xmodule.x_module import XModuleMixin # This should be moved into an XBlock Runtime/Application object # once the responsibility of XBlock creation is moved out of modulestore - cpennington XBLOCK_MIXINS = (LmsBlockMixin, InheritanceMixin, XModuleMixin) # Allow any XBlock in the LMS XBLOCK_SELECT_FUNCTION = prefer_xmodules ############# ModuleStore Configuration ########## MODULESTORE_BRANCH = 'published-only' CONTENTSTORE = None DOC_STORE_CONFIG = { 'host': 'localhost', 'db': 'xmodule', 'collection': 'modulestore', # If 'asset_collection' defined, it'll be used # as the collection name for asset metadata. # Otherwise, a default collection name will be used. } MODULESTORE = { 'default': { 'ENGINE': 'xmodule.modulestore.mixed.MixedModuleStore', 'OPTIONS': { 'mappings': {}, 'stores': [ { 'NAME': 'draft', 'ENGINE': 'xmodule.modulestore.mongo.DraftMongoModuleStore', 'DOC_STORE_CONFIG': DOC_STORE_CONFIG, 'OPTIONS': { 'default_class': 'xmodule.hidden_module.HiddenDescriptor', 'fs_root': DATA_DIR, 'render_template': 'edxmako.shortcuts.render_to_string', } }, { 'NAME': 'xml', 'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore', 'OPTIONS': { 'data_dir': DATA_DIR, 'default_class': 'xmodule.hidden_module.HiddenDescriptor', } }, { 'NAME': 'split', 'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore', 'DOC_STORE_CONFIG': DOC_STORE_CONFIG, 'OPTIONS': { 'default_class': 'xmodule.hidden_module.HiddenDescriptor', 'fs_root': DATA_DIR, 'render_template': 'edxmako.shortcuts.render_to_string', } }, ] } } } #################### Python sandbox ############################################ CODE_JAIL = { # Path to a sandboxed Python executable. None means don't bother. 'python_bin': None, # User to run as in the sandbox. 'user': 'sandbox', # Configurable limits. 'limits': { # How many CPU seconds can jailed code use? 'CPU': 1, }, } # Some courses are allowed to run unsafe code. This is a list of regexes, one # of them must match the course id for that course to run unsafe code. # # For example: # # COURSES_WITH_UNSAFE_CODE = [ # r"Harvard/XY123.1/.*" # ] COURSES_WITH_UNSAFE_CODE = [] ############################### DJANGO BUILT-INS ############################### # Change DEBUG/TEMPLATE_DEBUG in your environment settings files, not here DEBUG = False TEMPLATE_DEBUG = False USE_TZ = True SESSION_COOKIE_SECURE = False # CMS base CMS_BASE = 'localhost:8001' # Site info SITE_ID = 1 SITE_NAME = "example.com" HTTPS = 'on' ROOT_URLCONF = 'lms.urls' # NOTE: Please set ALLOWED_HOSTS to some sane value, as we do not allow the default '*' # Platform Email EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' DEFAULT_FROM_EMAIL = '[email protected]' DEFAULT_FEEDBACK_EMAIL = '[email protected]' SERVER_EMAIL = '[email protected]' TECH_SUPPORT_EMAIL = '[email protected]' CONTACT_EMAIL = '[email protected]' BUGS_EMAIL = '[email protected]' UNIVERSITY_EMAIL = '[email protected]' PRESS_EMAIL = '[email protected]' ADMINS = () MANAGERS = ADMINS # Static content STATIC_URL = '/static/' STATIC_ROOT = ENV_ROOT / "staticfiles" STATICFILES_DIRS = [ COMMON_ROOT / "static", PROJECT_ROOT / "static", ] FAVICON_PATH = 'images/favicon.ico' # Locale/Internationalization TIME_ZONE = 'America/New_York' # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name LANGUAGE_CODE = 'en' # http://www.i18nguy.com/unicode/language-identifiers.html # these languages display right to left LANGUAGES_BIDI = ("en@rtl", "he", "ar", "fa", "ur", "fa-ir") # Sourced from http://www.localeplanet.com/icu/ and wikipedia LANGUAGES = ( ('en', u'English'), ('en@rtl', u'English (right-to-left)'), ('eo', u'Dummy Language (Esperanto)'), # Dummy languaged used for testing ('fake2', u'Fake translations'), # Another dummy language for testing (not pushed to prod) ('am', u'አማርኛ'), # Amharic ('ar', u'العربية'), # Arabic ('az', u'azərbaycanca'), # Azerbaijani ('bg-bg', u'български (България)'), # Bulgarian (Bulgaria) ('bn-bd', u'বাংলা (বাংলাদেশ)'), # Bengali (Bangladesh) ('bn-in', u'বাংলা (ভারত)'), # Bengali (India) ('bs', u'bosanski'), # Bosnian ('ca', u'Català'), # Catalan ('ca@valencia', u'Català (València)'), # Catalan (Valencia) ('cs', u'Čeština'), # Czech ('cy', u'Cymraeg'), # Welsh ('da', u'dansk'), # Danish ('de-de', u'Deutsch (Deutschland)'), # German (Germany) ('el', u'Ελληνικά'), # Greek ('en-uk', u'English (United Kingdom)'), # English (United Kingdom) ('en@lolcat', u'LOLCAT English'), # LOLCAT English ('en@pirate', u'Pirate English'), # Pirate English ('es-419', u'Español (Latinoamérica)'), # Spanish (Latin America) ('es-ar', u'Español (Argentina)'), # Spanish (Argentina) ('es-ec', u'Español (Ecuador)'), # Spanish (Ecuador) ('es-es', u'Español (España)'), # Spanish (Spain) ('es-mx', u'Español (México)'), # Spanish (Mexico) ('es-pe', u'Español (Perú)'), # Spanish (Peru) ('et-ee', u'Eesti (Eesti)'), # Estonian (Estonia) ('eu-es', u'euskara (Espainia)'), # Basque (Spain) ('fa', u'فارسی'), # Persian ('fa-ir', u'فارسی (ایران)'), # Persian (Iran) ('fi-fi', u'Suomi (Suomi)'), # Finnish (Finland) ('fil', u'Filipino'), # Filipino ('fr', u'Français'), # French ('gl', u'Galego'), # Galician ('gu', u'ગુજરાતી'), # Gujarati ('he', u'עברית'), # Hebrew ('hi', u'हिन्दी'), # Hindi ('hr', u'hrvatski'), # Croatian ('hu', u'magyar'), # Hungarian ('hy-am', u'Հայերեն (Հայաստան)'), # Armenian (Armenia) ('id', u'Bahasa Indonesia'), # Indonesian ('it-it', u'Italiano (Italia)'), # Italian (Italy) ('ja-jp', u'日本語 (日本)'), # Japanese (Japan) ('kk-kz', u'қазақ тілі (Қазақстан)'), # Kazakh (Kazakhstan) ('km-kh', u'ភាសាខ្មែរ (កម្ពុជា)'), # Khmer (Cambodia) ('kn', u'ಕನ್ನಡ'), # Kannada ('ko-kr', u'한국어 (대한민국)'), # Korean (Korea) ('lt-lt', u'Lietuvių (Lietuva)'), # Lithuanian (Lithuania) ('ml', u'മലയാളം'), # Malayalam ('mn', u'Монгол хэл'), # Mongolian ('mr', u'मराठी'), # Marathi ('ms', u'Bahasa Melayu'), # Malay ('nb', u'Norsk bokmål'), # Norwegian Bokmål ('ne', u'नेपाली'), # Nepali ('nl-nl', u'Nederlands (Nederland)'), # Dutch (Netherlands) ('or', u'ଓଡ଼ିଆ'), # Oriya ('pl', u'Polski'), # Polish ('pt-br', u'Português (Brasil)'), # Portuguese (Brazil) ('pt-pt', u'Português (Portugal)'), # Portuguese (Portugal) ('ro', u'română'), # Romanian ('ru', u'Русский'), # Russian ('si', u'සිංහල'), # Sinhala ('sk', u'Slovenčina'), # Slovak ('sl', u'Slovenščina'), # Slovenian ('sq', u'shqip'), # Albanian ('sr', u'Српски'), # Serbian ('sv', u'svenska'), # Swedish ('sw', u'Kiswahili'), # Swahili ('ta', u'தமிழ்'), # Tamil ('te', u'తెలుగు'), # Telugu ('th', u'ไทย'), # Thai ('tr-tr', u'Türkçe (Türkiye)'), # Turkish (Turkey) ('uk', u'Українська'), # Ukranian ('ur', u'اردو'), # Urdu ('vi', u'Tiếng Việt'), # Vietnamese ('uz', u'Ўзбек'), # Uzbek ('zh-cn', u'中文 (简体)'), # Chinese (China) ('zh-hk', u'中文 (香港)'), # Chinese (Hong Kong) ('zh-tw', u'中文 (台灣)'), # Chinese (Taiwan) ) LANGUAGE_DICT = dict(LANGUAGES) USE_I18N = True USE_L10N = True # Localization strings (e.g. django.po) are under this directory LOCALE_PATHS = (REPO_ROOT + '/conf/locale',) # edx-platform/conf/locale/ # Messages MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage' # Guidelines for translators TRANSLATORS_GUIDE = 'https://github.com/edx/edx-platform/blob/master/docs/en_us/developers/source/i18n_translators_guide.rst' #################################### GITHUB ####################################### # gitreload is used in LMS-workflow to pull content from github # gitreload requests are only allowed from these IP addresses, which are # the advertised public IPs of the github WebHook servers. # These are listed, eg at https://github.com/edx/edx-platform/admin/hooks ALLOWED_GITRELOAD_IPS = ['207.97.227.253', '50.57.128.197', '108.171.174.178'] #################################### AWS ####################################### # S3BotoStorage insists on a timeout for uploaded assets. We should make it # permanent instead, but rather than trying to figure out exactly where that # setting is, I'm just bumping the expiration time to something absurd (100 # years). This is only used if DEFAULT_FILE_STORAGE is overriden to use S3 # in the global settings.py AWS_QUERYSTRING_EXPIRE = 10 * 365 * 24 * 60 * 60 # 10 years ################################# SIMPLEWIKI ################################### SIMPLE_WIKI_REQUIRE_LOGIN_EDIT = True SIMPLE_WIKI_REQUIRE_LOGIN_VIEW = False ################################# WIKI ################################### from course_wiki import settings as course_wiki_settings WIKI_ACCOUNT_HANDLING = False WIKI_EDITOR = 'course_wiki.editors.CodeMirror' WIKI_SHOW_MAX_CHILDREN = 0 # We don't use the little menu that shows children of an article in the breadcrumb WIKI_ANONYMOUS = False # Don't allow anonymous access until the styling is figured out WIKI_CAN_DELETE = course_wiki_settings.CAN_DELETE WIKI_CAN_MODERATE = course_wiki_settings.CAN_MODERATE WIKI_CAN_CHANGE_PERMISSIONS = course_wiki_settings.CAN_CHANGE_PERMISSIONS WIKI_CAN_ASSIGN = course_wiki_settings.CAN_ASSIGN WIKI_USE_BOOTSTRAP_SELECT_WIDGET = False WIKI_LINK_LIVE_LOOKUPS = False WIKI_LINK_DEFAULT_LEVEL = 2 ##### Feedback submission mechanism ##### FEEDBACK_SUBMISSION_EMAIL = None ##### Zendesk ##### ZENDESK_URL = None ZENDESK_USER = None ZENDESK_API_KEY = None ##### EMBARGO ##### EMBARGO_SITE_REDIRECT_URL = None ##### shoppingcart Payment ##### PAYMENT_SUPPORT_EMAIL = '[email protected]' ##### Using cybersource by default ##### CC_PROCESSOR_NAME = 'CyberSource' CC_PROCESSOR = { 'CyberSource': { 'SHARED_SECRET': '', 'MERCHANT_ID': '', 'SERIAL_NUMBER': '', 'ORDERPAGE_VERSION': '7', 'PURCHASE_ENDPOINT': '', }, 'CyberSource2': { "PURCHASE_ENDPOINT": '', "SECRET_KEY": '', "ACCESS_KEY": '', "PROFILE_ID": '', } } # Setting for PAID_COURSE_REGISTRATION, DOES NOT AFFECT VERIFIED STUDENTS PAID_COURSE_REGISTRATION_CURRENCY = ['usd', '$'] # Members of this group are allowed to generate payment reports PAYMENT_REPORT_GENERATOR_GROUP = 'shoppingcart_report_access' ################################# open ended grading config ##################### #By setting up the default settings with an incorrect user name and password, # will get an error when attempting to connect OPEN_ENDED_GRADING_INTERFACE = { 'url': 'http://example.com/peer_grading', 'username': 'incorrect_user', 'password': 'incorrect_pass', 'staff_grading': 'staff_grading', 'peer_grading': 'peer_grading', 'grading_controller': 'grading_controller' } # Used for testing, debugging peer grading MOCK_PEER_GRADING = False # Used for testing, debugging staff grading MOCK_STAFF_GRADING = False ################################# Jasmine ################################## JASMINE_TEST_DIRECTORY = PROJECT_ROOT + '/static/coffee' ################################# Deprecation warnings ##################### # Ignore deprecation warnings (so we don't clutter Jenkins builds/production) simplefilter('ignore') ################################# Middleware ################################### # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'staticfiles.finders.FileSystemFinder', 'staticfiles.finders.AppDirectoriesFinder', 'pipeline.finders.PipelineFinder', ) # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'edxmako.makoloader.MakoFilesystemLoader', 'edxmako.makoloader.MakoAppDirectoriesLoader', # 'django.template.loaders.filesystem.Loader', # 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'request_cache.middleware.RequestCache', 'microsite_configuration.middleware.MicrositeMiddleware', 'django_comment_client.middleware.AjaxExceptionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', # Instead of AuthenticationMiddleware, we use a cached backed version #'django.contrib.auth.middleware.AuthenticationMiddleware', 'cache_toolbox.middleware.CacheBackedAuthenticationMiddleware', 'student.middleware.UserStandingMiddleware', 'contentserver.middleware.StaticContentServer', 'crum.CurrentRequestUserMiddleware', # Adds user tags to tracking events # Must go before TrackMiddleware, to get the context set up 'user_api.middleware.UserTagsEventContextMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'track.middleware.TrackMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'splash.middleware.SplashMiddleware', # Allows us to dark-launch particular languages 'dark_lang.middleware.DarkLangMiddleware', 'geoinfo.middleware.CountryMiddleware', 'embargo.middleware.EmbargoMiddleware', # Allows us to set user preferences # should be after DarkLangMiddleware 'lang_pref.middleware.LanguagePreferenceMiddleware', # Detects user-requested locale from 'accept-language' header in http request 'django.middleware.locale.LocaleMiddleware', 'django.middleware.transaction.TransactionMiddleware', # 'debug_toolbar.middleware.DebugToolbarMiddleware', 'django_comment_client.utils.ViewNameMiddleware', 'codejail.django_integration.ConfigureCodeJailMiddleware', # catches any uncaught RateLimitExceptions and returns a 403 instead of a 500 'ratelimitbackend.middleware.RateLimitMiddleware', # needs to run after locale middleware (or anything that modifies the request context) 'edxmako.middleware.MakoMiddleware', # for expiring inactive sessions 'session_inactivity_timeout.middleware.SessionInactivityTimeout', # use Django built in clickjacking protection 'django.middleware.clickjacking.XFrameOptionsMiddleware', # to redirected unenrolled students to the course info page 'courseware.middleware.RedirectUnenrolledMiddleware', 'course_wiki.middleware.WikiAccessMiddleware', ) # Clickjacking protection can be enabled by setting this to 'DENY' X_FRAME_OPTIONS = 'ALLOW' ############################### Pipeline ####################################### STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage' from rooted_paths import rooted_glob courseware_js = ( [ 'coffee/src/' + pth + '.js' for pth in ['courseware', 'histogram', 'navigation', 'time'] ] + ['js/' + pth + '.js' for pth in ['ajax-error']] + sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/modules/**/*.js')) ) # Before a student accesses courseware, we do not # need many of the JS dependencies. This includes # only the dependencies used everywhere in the LMS # (including the dashboard/account/profile pages) # Currently, this partially duplicates the "main vendor" # JavaScript file, so only one of the two should be included # on a page at any time. # In the future, we will likely refactor this to use # RequireJS and an optimizer. base_vendor_js = [ 'js/vendor/jquery.min.js', 'js/vendor/jquery.cookie.js', 'js/vendor/underscore-min.js' ] main_vendor_js = base_vendor_js + [ 'js/vendor/require.js', 'js/RequireJS-namespace-undefine.js', 'js/vendor/json2.js', 'js/vendor/jquery-ui.min.js', 'js/vendor/jquery.qtip.min.js', 'js/vendor/swfobject/swfobject.js', 'js/vendor/jquery.ba-bbq.min.js', 'js/vendor/ova/annotator-full.js', 'js/vendor/ova/annotator-full-firebase-auth.js', 'js/vendor/ova/video.dev.js', 'js/vendor/ova/vjs.youtube.js', 'js/vendor/ova/rangeslider.js', 'js/vendor/ova/share-annotator.js', 'js/vendor/ova/richText-annotator.js', 'js/vendor/ova/reply-annotator.js', 'js/vendor/ova/tags-annotator.js', 'js/vendor/ova/flagging-annotator.js', 'js/vendor/ova/diacritic-annotator.js', 'js/vendor/ova/grouping-annotator.js', 'js/vendor/ova/jquery-Watch.js', 'js/vendor/ova/openseadragon.js', 'js/vendor/ova/OpenSeaDragonAnnotation.js', 'js/vendor/ova/ova.js', 'js/vendor/ova/catch/js/catch.js', 'js/vendor/ova/catch/js/handlebars-1.1.2.js', 'js/vendor/URI.min.js', ] dashboard_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'js/dashboard/**/*.js')) discussion_js = sorted(rooted_glob(COMMON_ROOT / 'static', 'coffee/src/discussion/**/*.js')) rwd_header_footer_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'js/common_helpers/rwd_header_footer.js')) staff_grading_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/staff_grading/**/*.js')) open_ended_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/open_ended/**/*.js')) notes_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/notes/**/*.js')) instructor_dash_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/instructor_dashboard/**/*.js')) # JavaScript used by the student account and profile pages # These are not courseware, so they do not need many of the courseware-specific # JavaScript modules. student_account_js = [ 'js/utils/rwd_header_footer.js', 'js/utils/edx.utils.validate.js', 'js/src/utility.js', 'js/student_account/enrollment.js', 'js/student_account/shoppingcart.js', 'js/student_account/models/LoginModel.js', 'js/student_account/models/RegisterModel.js', 'js/student_account/models/PasswordResetModel.js', 'js/student_account/views/FormView.js', 'js/student_account/views/LoginView.js', 'js/student_account/views/RegisterView.js', 'js/student_account/views/PasswordResetView.js', 'js/student_account/views/AccessView.js', 'js/student_account/accessApp.js', ] student_profile_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'js/student_profile/**/*.js')) PIPELINE_CSS = { 'style-vendor': { 'source_filenames': [ 'css/vendor/font-awesome.css', 'css/vendor/jquery.qtip.min.css', 'css/vendor/responsive-carousel/responsive-carousel.css', 'css/vendor/responsive-carousel/responsive-carousel.slide.css', ], 'output_filename': 'css/lms-style-vendor.css', }, 'style-vendor-tinymce-content': { 'source_filenames': [ 'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/content.min.css' ], 'output_filename': 'css/lms-style-vendor-tinymce-content.css', }, 'style-vendor-tinymce-skin': { 'source_filenames': [ 'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/skin.min.css' ], 'output_filename': 'css/lms-style-vendor-tinymce-skin.css', }, 'style-app': { 'source_filenames': [ 'sass/application.css', 'sass/ie.css' ], 'output_filename': 'css/lms-style-app.css', }, 'style-app-extend1': { 'source_filenames': [ 'sass/application-extend1.css', ], 'output_filename': 'css/lms-style-app-extend1.css', }, 'style-app-extend2': { 'source_filenames': [ 'sass/application-extend2.css', ], 'output_filename': 'css/lms-style-app-extend2.css', }, 'style-app-rtl': { 'source_filenames': [ 'sass/application-rtl.css', 'sass/ie-rtl.css' ], 'output_filename': 'css/lms-style-app-rtl.css', }, 'style-app-extend1-rtl': { 'source_filenames': [ 'sass/application-extend1-rtl.css', ], 'output_filename': 'css/lms-style-app-extend1-rtl.css', }, 'style-app-extend2-rtl': { 'source_filenames': [ 'sass/application-extend2-rtl.css', ], 'output_filename': 'css/lms-style-app-extend2-rtl.css', }, 'style-course-vendor': { 'source_filenames': [ 'js/vendor/CodeMirror/codemirror.css', 'css/vendor/jquery.treeview.css', 'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css', ], 'output_filename': 'css/lms-style-course-vendor.css', }, 'style-course': { 'source_filenames': [ 'sass/course.css', 'xmodule/modules.css', ], 'output_filename': 'css/lms-style-course.css', }, 'style-course-rtl': { 'source_filenames': [ 'sass/course-rtl.css', 'xmodule/modules.css', ], 'output_filename': 'css/lms-style-course-rtl.css', }, 'style-xmodule-annotations': { 'source_filenames': [ 'css/vendor/ova/annotator.css', 'css/vendor/ova/edx-annotator.css', 'css/vendor/ova/video-js.min.css', 'css/vendor/ova/rangeslider.css', 'css/vendor/ova/share-annotator.css', 'css/vendor/ova/richText-annotator.css', 'css/vendor/ova/tags-annotator.css', 'css/vendor/ova/flagging-annotator.css', 'css/vendor/ova/diacritic-annotator.css', 'css/vendor/ova/grouping-annotator.css', 'css/vendor/ova/ova.css', 'js/vendor/ova/catch/css/main.css' ], 'output_filename': 'css/lms-style-xmodule-annotations.css', }, } common_js = set(rooted_glob(COMMON_ROOT / 'static', 'coffee/src/**/*.js')) - set(courseware_js + discussion_js + staff_grading_js + open_ended_js + notes_js + instructor_dash_js) project_js = set(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/**/*.js')) - set(courseware_js + discussion_js + staff_grading_js + open_ended_js + notes_js + instructor_dash_js) PIPELINE_JS = { 'application': { # Application will contain all paths not in courseware_only_js 'source_filenames': sorted(common_js) + sorted(project_js) + [ 'js/form.ext.js', 'js/my_courses_dropdown.js', 'js/toggle_login_modal.js', 'js/sticky_filter.js', 'js/query-params.js', 'js/src/utility.js', 'js/src/accessibility_tools.js', 'js/src/ie_shim.js', 'js/src/string_utils.js', ], 'output_filename': 'js/lms-application.js', }, 'courseware': { 'source_filenames': courseware_js, 'output_filename': 'js/lms-courseware.js', }, 'base_vendor': { 'source_filenames': base_vendor_js, 'output_filename': 'js/lms-base-vendor.js', }, 'main_vendor': { 'source_filenames': main_vendor_js, 'output_filename': 'js/lms-main_vendor.js', }, 'module-descriptor-js': { 'source_filenames': rooted_glob(COMMON_ROOT / 'static/', 'xmodule/descriptors/js/*.js'), 'output_filename': 'js/lms-module-descriptors.js', }, 'module-js': { 'source_filenames': rooted_glob(COMMON_ROOT / 'static', 'xmodule/modules/js/*.js'), 'output_filename': 'js/lms-modules.js', }, 'discussion': { 'source_filenames': discussion_js, 'output_filename': 'js/discussion.js', }, 'staff_grading': { 'source_filenames': staff_grading_js, 'output_filename': 'js/staff_grading.js', }, 'open_ended': { 'source_filenames': open_ended_js, 'output_filename': 'js/open_ended.js', }, 'notes': { 'source_filenames': notes_js, 'output_filename': 'js/notes.js', }, 'instructor_dash': { 'source_filenames': instructor_dash_js, 'output_filename': 'js/instructor_dash.js', }, 'dashboard': { 'source_filenames': dashboard_js, 'output_filename': 'js/dashboard.js' }, 'rwd_header_footer': { 'source_filenames': rwd_header_footer_js, 'output_filename': 'js/rwd_header_footer.js' }, 'student_account': { 'source_filenames': student_account_js, 'output_filename': 'js/student_account.js' }, 'student_profile': { 'source_filenames': student_profile_js, 'output_filename': 'js/student_profile.js' }, } PIPELINE_DISABLE_WRAPPER = True # Compile all coffee files in course data directories if they are out of date. # TODO: Remove this once we move data into Mongo. This is only temporary while # course data directories are still in use. if os.path.isdir(DATA_DIR): for course_dir in os.listdir(DATA_DIR): js_dir = DATA_DIR / course_dir / "js" if not os.path.isdir(js_dir): continue for filename in os.listdir(js_dir): if filename.endswith('coffee'): new_filename = os.path.splitext(filename)[0] + ".js" if os.path.exists(js_dir / new_filename): coffee_timestamp = os.stat(js_dir / filename).st_mtime js_timestamp = os.stat(js_dir / new_filename).st_mtime if coffee_timestamp <= js_timestamp: continue os.system("rm %s" % (js_dir / new_filename)) os.system("coffee -c %s" % (js_dir / filename)) PIPELINE_CSS_COMPRESSOR = None PIPELINE_JS_COMPRESSOR = "pipeline.compressors.uglifyjs.UglifyJSCompressor" STATICFILES_IGNORE_PATTERNS = ( "sass/*", "coffee/*", # Symlinks used by js-test-tool "xmodule_js", "common_static", ) PIPELINE_UGLIFYJS_BINARY = 'node_modules/.bin/uglifyjs' # Setting that will only affect the edX version of django-pipeline until our changes are merged upstream PIPELINE_COMPILE_INPLACE = True ################################# CELERY ###################################### # Message configuration CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' CELERY_MESSAGE_COMPRESSION = 'gzip' # Results configuration CELERY_IGNORE_RESULT = False CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True # Events configuration CELERY_TRACK_STARTED = True CELERY_SEND_EVENTS = True CELERY_SEND_TASK_SENT_EVENT = True # Exchange configuration CELERY_DEFAULT_EXCHANGE = 'edx.core' CELERY_DEFAULT_EXCHANGE_TYPE = 'direct' # Queues configuration HIGH_PRIORITY_QUEUE = 'edx.core.high' DEFAULT_PRIORITY_QUEUE = 'edx.core.default' LOW_PRIORITY_QUEUE = 'edx.core.low' HIGH_MEM_QUEUE = 'edx.core.high_mem' CELERY_QUEUE_HA_POLICY = 'all' CELERY_CREATE_MISSING_QUEUES = True CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE CELERY_QUEUES = { HIGH_PRIORITY_QUEUE: {}, LOW_PRIORITY_QUEUE: {}, DEFAULT_PRIORITY_QUEUE: {}, HIGH_MEM_QUEUE: {}, } # let logging work as configured: CELERYD_HIJACK_ROOT_LOGGER = False ################################ Bulk Email ################################### # Suffix used to construct 'from' email address for bulk emails. # A course-specific identifier is prepended. BULK_EMAIL_DEFAULT_FROM_EMAIL = '[email protected]' # Parameters for breaking down course enrollment into subtasks. BULK_EMAIL_EMAILS_PER_TASK = 100 # Initial delay used for retrying tasks. Additional retries use # longer delays. Value is in seconds. BULK_EMAIL_DEFAULT_RETRY_DELAY = 30 # Maximum number of retries per task for errors that are not related # to throttling. BULK_EMAIL_MAX_RETRIES = 5 # Maximum number of retries per task for errors that are related to # throttling. If this is not set, then there is no cap on such retries. BULK_EMAIL_INFINITE_RETRY_CAP = 1000 # We want Bulk Email running on the high-priority queue, so we define the # routing key that points to it. At the moment, the name is the same. BULK_EMAIL_ROUTING_KEY = HIGH_PRIORITY_QUEUE # Flag to indicate if individual email addresses should be logged as they are sent # a bulk email message. BULK_EMAIL_LOG_SENT_EMAILS = False # Delay in seconds to sleep between individual mail messages being sent, # when a bulk email task is retried for rate-related reasons. Choose this # value depending on the number of workers that might be sending email in # parallel, and what the SES rate is. BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS = 0.02 ############################## Video ########################################## YOUTUBE = { # YouTube JavaScript API 'API': 'www.youtube.com/iframe_api', # URL to test YouTube availability 'TEST_URL': 'gdata.youtube.com/feeds/api/videos/', # Current youtube api for requesting transcripts. # For example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g. 'TEXT_API': { 'url': 'video.google.com/timedtext', 'params': { 'lang': 'en', 'v': 'set_youtube_id_of_11_symbols_here', }, }, } ################################### APPS ###################################### INSTALLED_APPS = ( # Standard ones that are always installed... 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.humanize', 'django.contrib.messages', 'django.contrib.sessions', 'django.contrib.sites', 'djcelery', 'south', # Database-backed configuration 'config_models', # Monitor the status of services 'service_status', # For asset pipelining 'edxmako', 'pipeline', 'staticfiles', 'static_replace', # Our courseware 'circuit', 'courseware', 'student', 'static_template_view', 'staticbook', 'track', 'eventtracking.django', 'util', 'certificates', 'dashboard', 'instructor', 'instructor_task', 'open_ended_grading', 'psychometrics', 'licenses', 'course_groups', 'bulk_email', # External auth (OpenID, shib) 'external_auth', 'django_openid_auth', # OAuth2 Provider 'provider', 'provider.oauth2', 'oauth2_provider', # For the wiki 'wiki', # The new django-wiki from benjaoming 'django_notify', 'course_wiki', # Our customizations 'mptt', 'sekizai', #'wiki.plugins.attachments', 'wiki.plugins.links', 'wiki.plugins.notifications', 'course_wiki.plugins.markdownedx', # Foldit integration 'foldit', # For testing 'django.contrib.admin', # only used in DEBUG mode 'django_nose', 'debug', # Discussion forums 'django_comment_client', 'django_comment_common', 'notes', # Splash screen 'splash', # Monitoring 'datadog', # User API 'rest_framework', 'user_api', # Shopping cart 'shoppingcart', # Notification preferences setting 'notification_prefs', 'notifier_api', # Different Course Modes 'course_modes', # Student Identity Verification 'verify_student', # Dark-launching languages 'dark_lang', # Microsite configuration 'microsite_configuration', # Student Identity Reverification 'reverification', 'embargo', # Monitoring functionality 'monitoring', # Course action state 'course_action_state', # Additional problem types 'edx_jsme', # Molecular Structure # Country list 'django_countries', # edX Mobile API 'mobile_api', # Surveys 'survey', ) ######################### MARKETING SITE ############################### EDXMKTG_COOKIE_NAME = 'edxloggedin' MKTG_URLS = {} MKTG_URL_LINK_MAP = { 'ABOUT': 'about_edx', 'CONTACT': 'contact', 'FAQ': 'help_edx', 'COURSES': 'courses', 'SCHOOLS':'schools', 'ROOT': 'root',<|fim▁hole|> 'NEWS': 'news', 'PRESS': 'press', 'BLOG': 'edx-blog', 'DONATE': 'donate', # Verified Certificates 'WHAT_IS_VERIFIED_CERT': 'verified-certificate', } ################# Student Verification ################# VERIFY_STUDENT = { "DAYS_GOOD_FOR": 365, # How many days is a verficiation good for? } ### This enables the Metrics tab for the Instructor dashboard ########### FEATURES['CLASS_DASHBOARD'] = False if FEATURES.get('CLASS_DASHBOARD'): INSTALLED_APPS += ('class_dashboard',) ######################## CAS authentication ########################### if FEATURES.get('AUTH_USE_CAS'): CAS_SERVER_URL = 'https://provide_your_cas_url_here' AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', 'django_cas.backends.CASBackend', ) INSTALLED_APPS += ('django_cas',) MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',) ###################### Registration ################################## # For each of the fields, give one of the following values: # - 'required': to display the field, and make it mandatory # - 'optional': to display the field, and make it non-mandatory # - 'hidden': to not display the field REGISTRATION_EXTRA_FIELDS = { 'level_of_education': 'optional', 'gender': 'optional', 'year_of_birth': 'optional', 'mailing_address': 'optional', 'goals': 'optional', 'honor_code': 'required', 'terms_of_service': 'hidden', 'city': 'hidden', 'country': 'hidden', } ########################## CERTIFICATE NAME ######################## CERT_NAME_SHORT = "Certificate" CERT_NAME_LONG = "Certificate of Achievement" ###################### Grade Downloads ###################### GRADES_DOWNLOAD_ROUTING_KEY = HIGH_MEM_QUEUE GRADES_DOWNLOAD = { 'STORAGE_TYPE': 'localfs', 'BUCKET': 'edx-grades', 'ROOT_PATH': '/tmp/edx-s3/grades', } ######################## PROGRESS SUCCESS BUTTON ############################## # The following fields are available in the URL: {course_id} {student_id} PROGRESS_SUCCESS_BUTTON_URL = 'http://<domain>/<path>/{course_id}' PROGRESS_SUCCESS_BUTTON_TEXT_OVERRIDE = None #### PASSWORD POLICY SETTINGS ##### PASSWORD_MIN_LENGTH = None PASSWORD_MAX_LENGTH = None PASSWORD_COMPLEXITY = {} PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = None PASSWORD_DICTIONARY = [] ##################### LinkedIn ##################### INSTALLED_APPS += ('django_openid_auth',) ############################ LinkedIn Integration ############################# INSTALLED_APPS += ('linkedin',) LINKEDIN_API = { 'EMAIL_WHITELIST': [], 'COMPANY_ID': '2746406', } ############################ ORA 2 ############################################ # By default, don't use a file prefix ORA2_FILE_PREFIX = None # Default File Upload Storage bucket and prefix. Used by the FileUpload Service. FILE_UPLOAD_STORAGE_BUCKET_NAME = 'edxuploads' FILE_UPLOAD_STORAGE_PREFIX = 'submissions_attachments' ##### ACCOUNT LOCKOUT DEFAULT PARAMETERS ##### MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = 5 MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60 ##### LMS DEADLINE DISPLAY TIME_ZONE ####### TIME_ZONE_DISPLAYED_FOR_DEADLINES = 'UTC' # Source: # http://loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt according to http://en.wikipedia.org/wiki/ISO_639-1 ALL_LANGUAGES = ( [u"aa", u"Afar"], [u"ab", u"Abkhazian"], [u"af", u"Afrikaans"], [u"ak", u"Akan"], [u"sq", u"Albanian"], [u"am", u"Amharic"], [u"ar", u"Arabic"], [u"an", u"Aragonese"], [u"hy", u"Armenian"], [u"as", u"Assamese"], [u"av", u"Avaric"], [u"ae", u"Avestan"], [u"ay", u"Aymara"], [u"az", u"Azerbaijani"], [u"ba", u"Bashkir"], [u"bm", u"Bambara"], [u"eu", u"Basque"], [u"be", u"Belarusian"], [u"bn", u"Bengali"], [u"bh", u"Bihari languages"], [u"bi", u"Bislama"], [u"bs", u"Bosnian"], [u"br", u"Breton"], [u"bg", u"Bulgarian"], [u"my", u"Burmese"], [u"ca", u"Catalan"], [u"ch", u"Chamorro"], [u"ce", u"Chechen"], [u"zh", u"Chinese"], [u"cu", u"Church Slavic"], [u"cv", u"Chuvash"], [u"kw", u"Cornish"], [u"co", u"Corsican"], [u"cr", u"Cree"], [u"cs", u"Czech"], [u"da", u"Danish"], [u"dv", u"Divehi"], [u"nl", u"Dutch"], [u"dz", u"Dzongkha"], [u"en", u"English"], [u"eo", u"Esperanto"], [u"et", u"Estonian"], [u"ee", u"Ewe"], [u"fo", u"Faroese"], [u"fj", u"Fijian"], [u"fi", u"Finnish"], [u"fr", u"French"], [u"fy", u"Western Frisian"], [u"ff", u"Fulah"], [u"ka", u"Georgian"], [u"de", u"German"], [u"gd", u"Gaelic"], [u"ga", u"Irish"], [u"gl", u"Galician"], [u"gv", u"Manx"], [u"el", u"Greek"], [u"gn", u"Guarani"], [u"gu", u"Gujarati"], [u"ht", u"Haitian"], [u"ha", u"Hausa"], [u"he", u"Hebrew"], [u"hz", u"Herero"], [u"hi", u"Hindi"], [u"ho", u"Hiri Motu"], [u"hr", u"Croatian"], [u"hu", u"Hungarian"], [u"ig", u"Igbo"], [u"is", u"Icelandic"], [u"io", u"Ido"], [u"ii", u"Sichuan Yi"], [u"iu", u"Inuktitut"], [u"ie", u"Interlingue"], [u"ia", u"Interlingua"], [u"id", u"Indonesian"], [u"ik", u"Inupiaq"], [u"it", u"Italian"], [u"jv", u"Javanese"], [u"ja", u"Japanese"], [u"kl", u"Kalaallisut"], [u"kn", u"Kannada"], [u"ks", u"Kashmiri"], [u"kr", u"Kanuri"], [u"kk", u"Kazakh"], [u"km", u"Central Khmer"], [u"ki", u"Kikuyu"], [u"rw", u"Kinyarwanda"], [u"ky", u"Kirghiz"], [u"kv", u"Komi"], [u"kg", u"Kongo"], [u"ko", u"Korean"], [u"kj", u"Kuanyama"], [u"ku", u"Kurdish"], [u"lo", u"Lao"], [u"la", u"Latin"], [u"lv", u"Latvian"], [u"li", u"Limburgan"], [u"ln", u"Lingala"], [u"lt", u"Lithuanian"], [u"lb", u"Luxembourgish"], [u"lu", u"Luba-Katanga"], [u"lg", u"Ganda"], [u"mk", u"Macedonian"], [u"mh", u"Marshallese"], [u"ml", u"Malayalam"], [u"mi", u"Maori"], [u"mr", u"Marathi"], [u"ms", u"Malay"], [u"mg", u"Malagasy"], [u"mt", u"Maltese"], [u"mn", u"Mongolian"], [u"na", u"Nauru"], [u"nv", u"Navajo"], [u"nr", u"Ndebele, South"], [u"nd", u"Ndebele, North"], [u"ng", u"Ndonga"], [u"ne", u"Nepali"], [u"nn", u"Norwegian Nynorsk"], [u"nb", u"Bokmål, Norwegian"], [u"no", u"Norwegian"], [u"ny", u"Chichewa"], [u"oc", u"Occitan"], [u"oj", u"Ojibwa"], [u"or", u"Oriya"], [u"om", u"Oromo"], [u"os", u"Ossetian"], [u"pa", u"Panjabi"], [u"fa", u"Persian"], [u"pi", u"Pali"], [u"pl", u"Polish"], [u"pt", u"Portuguese"], [u"ps", u"Pushto"], [u"qu", u"Quechua"], [u"rm", u"Romansh"], [u"ro", u"Romanian"], [u"rn", u"Rundi"], [u"ru", u"Russian"], [u"sg", u"Sango"], [u"sa", u"Sanskrit"], [u"si", u"Sinhala"], [u"sk", u"Slovak"], [u"sl", u"Slovenian"], [u"se", u"Northern Sami"], [u"sm", u"Samoan"], [u"sn", u"Shona"], [u"sd", u"Sindhi"], [u"so", u"Somali"], [u"st", u"Sotho, Southern"], [u"es", u"Spanish"], [u"sc", u"Sardinian"], [u"sr", u"Serbian"], [u"ss", u"Swati"], [u"su", u"Sundanese"], [u"sw", u"Swahili"], [u"sv", u"Swedish"], [u"ty", u"Tahitian"], [u"ta", u"Tamil"], [u"tt", u"Tatar"], [u"te", u"Telugu"], [u"tg", u"Tajik"], [u"tl", u"Tagalog"], [u"th", u"Thai"], [u"bo", u"Tibetan"], [u"ti", u"Tigrinya"], [u"to", u"Tonga (Tonga Islands)"], [u"tn", u"Tswana"], [u"ts", u"Tsonga"], [u"tk", u"Turkmen"], [u"tr", u"Turkish"], [u"tw", u"Twi"], [u"ug", u"Uighur"], [u"uk", u"Ukrainian"], [u"ur", u"Urdu"], [u"uz", u"Uzbek"], [u"ve", u"Venda"], [u"vi", u"Vietnamese"], [u"vo", u"Volapük"], [u"cy", u"Welsh"], [u"wa", u"Walloon"], [u"wo", u"Wolof"], [u"xh", u"Xhosa"], [u"yi", u"Yiddish"], [u"yo", u"Yoruba"], [u"za", u"Zhuang"], [u"zu", u"Zulu"] ) ### Apps only installed in some instances OPTIONAL_APPS = ( 'mentoring', # edx-ora2 'submissions', 'openassessment', 'openassessment.assessment', 'openassessment.fileupload', 'openassessment.workflow', 'openassessment.xblock', # edxval 'edxval' ) for app_name in OPTIONAL_APPS: # First attempt to only find the module rather than actually importing it, # to avoid circular references - only try to import if it can't be found # by find_module, which doesn't work with import hooks try: imp.find_module(app_name) except ImportError: try: __import__(app_name) except ImportError: continue INSTALLED_APPS += (app_name,) # Stub for third_party_auth options. # See common/djangoapps/third_party_auth/settings.py for configuration details. THIRD_PARTY_AUTH = {} ### ADVANCED_SECURITY_CONFIG # Empty by default ADVANCED_SECURITY_CONFIG = {} ### External auth usage -- prefixes for ENROLLMENT_DOMAIN SHIBBOLETH_DOMAIN_PREFIX = 'shib:' OPENID_DOMAIN_PREFIX = 'openid:' ### Analytics Data API + Dashboard (Insights) settings ANALYTICS_DATA_URL = "" ANALYTICS_DATA_TOKEN = "" ANALYTICS_DASHBOARD_URL = "" ANALYTICS_DASHBOARD_NAME = PLATFORM_NAME + " Insights" # REGISTRATION CODES DISPLAY INFORMATION SUBTITUTIONS IN THE INVOICE ATTACHMENT INVOICE_CORP_ADDRESS = "Please place your corporate address\nin this configuration" INVOICE_PAYMENT_INSTRUCTIONS = "This is where you can\nput directions on how people\nbuying registration codes" # Country code overrides # Used by django-countries COUNTRIES_OVERRIDE = { "TW": _("Taiwan"), } # which access.py permission name to check in order to determine if a course is visible in # the course catalog. We default this to the legacy permission 'see_exists'. COURSE_CATALOG_VISIBILITY_PERMISSION = 'see_exists' # which access.py permission name to check in order to determine if a course about page is # visible. We default this to the legacy permission 'see_exists'. COURSE_ABOUT_VISIBILITY_PERMISSION = 'see_exists'<|fim▁end|>
'TOS': 'tos', 'HONOR': 'honor', 'PRIVACY': 'privacy_edx', 'JOBS': 'jobs',
<|file_name|>file.js<|end_file_name|><|fim▁begin|>/** * @file * Provides JavaScript additions to the managed file field type. * * This file provides progress bar support (if available), popup windows for * file previews, and disabling of other file fields during Ajax uploads (which * prevents separate file fields from accidentally uploading files). */ (function ($, Drupal) { "use strict"; /** * Attach behaviors to managed file element upload fields. */ Drupal.behaviors.fileValidateAutoAttach = { attach: function (context, settings) { var $context = $(context); var elements; function initFileValidation(selector) { $context.find(selector) .once('fileValidate') .on('change.fileValidate', { extensions: elements[selector] }, Drupal.file.validateExtension); } if (settings.file && settings.file.elements) { elements = settings.file.elements; Object.keys(elements).forEach(initFileValidation); } }, detach: function (context, settings, trigger) { var $context = $(context); var elements; function removeFileValidation(selector) { $context.find(selector) .removeOnce('fileValidate') .off('change.fileValidate', Drupal.file.validateExtension); } if (trigger === 'unload' && settings.file && settings.file.elements) { elements = settings.file.elements; Object.keys(elements).forEach(removeFileValidation); } } }; /** * Attach behaviors to managed file element upload fields. */ Drupal.behaviors.fileAutoUpload = {<|fim▁hole|> $(context).find('input[type="file"]').once('auto-file-upload').on('change.autoFileUpload', Drupal.file.triggerUploadButton); }, detach: function (context, setting, trigger) { if (trigger === 'unload') { $(context).find('input[type="file"]').removeOnce('auto-file-upload').off('.autoFileUpload'); } } }; /** * Attach behaviors to the file upload and remove buttons. */ Drupal.behaviors.fileButtons = { attach: function (context) { var $context = $(context); $context.find('.form-submit').on('mousedown', Drupal.file.disableFields); $context.find('.form-managed-file .form-submit').on('mousedown', Drupal.file.progressBar); }, detach: function (context) { var $context = $(context); $context.find('.form-submit').off('mousedown', Drupal.file.disableFields); $context.find('.form-managed-file .form-submit').off('mousedown', Drupal.file.progressBar); } }; /** * Attach behaviors to links within managed file elements. */ Drupal.behaviors.filePreviewLinks = { attach: function (context) { $(context).find('div.form-managed-file .file a, .file-widget .file a').on('click', Drupal.file.openInNewWindow); }, detach: function (context) { $(context).find('div.form-managed-file .file a, .file-widget .file a').off('click', Drupal.file.openInNewWindow); } }; /** * File upload utility functions. */ Drupal.file = Drupal.file || { /** * Client-side file input validation of file extensions. */ validateExtension: function (event) { event.preventDefault(); // Remove any previous errors. $('.file-upload-js-error').remove(); // Add client side validation for the input[type=file]. var extensionPattern = event.data.extensions.replace(/,\s*/g, '|'); if (extensionPattern.length > 1 && this.value.length > 0) { var acceptableMatch = new RegExp('\\.(' + extensionPattern + ')$', 'gi'); if (!acceptableMatch.test(this.value)) { var error = Drupal.t("The selected file %filename cannot be uploaded. Only files with the following extensions are allowed: %extensions.", { // According to the specifications of HTML5, a file upload control // should not reveal the real local path to the file that a user // has selected. Some web browsers implement this restriction by // replacing the local path with "C:\fakepath\", which can cause // confusion by leaving the user thinking perhaps Drupal could not // find the file because it messed up the file path. To avoid this // confusion, therefore, we strip out the bogus fakepath string. '%filename': this.value.replace('C:\\fakepath\\', ''), '%extensions': extensionPattern.replace(/\|/g, ', ') }); $(this).closest('div.form-managed-file').prepend('<div class="messages messages--error file-upload-js-error" aria-live="polite">' + error + '</div>'); this.value = ''; // Cancel all other change event handlers. event.stopImmediatePropagation(); } } }, /** * Trigger the upload_button mouse event to auto-upload as a managed file. */ triggerUploadButton: function (event) { $(event.target).closest('.form-managed-file').find('.form-submit').trigger('mousedown'); }, /** * Prevent file uploads when using buttons not intended to upload. */ disableFields: function (event) { var $clickedButton = $(this); // Only disable upload fields for Ajax buttons. if (!$clickedButton.hasClass('ajax-processed')) { return; } // Check if we're working with an "Upload" button. var $enabledFields = []; if ($clickedButton.closest('div.form-managed-file').length > 0) { $enabledFields = $clickedButton.closest('div.form-managed-file').find('input.form-file'); } // Temporarily disable upload fields other than the one we're currently // working with. Filter out fields that are already disabled so that they // do not get enabled when we re-enable these fields at the end of behavior // processing. Re-enable in a setTimeout set to a relatively short amount // of time (1 second). All the other mousedown handlers (like Drupal's Ajax // behaviors) are excuted before any timeout functions are called, so we // don't have to worry about the fields being re-enabled too soon. // @todo If the previous sentence is true, why not set the timeout to 0? var $fieldsToTemporarilyDisable = $('div.form-managed-file input.form-file').not($enabledFields).not(':disabled'); $fieldsToTemporarilyDisable.prop('disabled', true); setTimeout(function () { $fieldsToTemporarilyDisable.prop('disabled', false); }, 1000); }, /** * Add progress bar support if possible. */ progressBar: function (event) { var $clickedButton = $(this); var $progressId = $clickedButton.closest('div.form-managed-file').find('input.file-progress'); if ($progressId.length) { var originalName = $progressId.attr('name'); // Replace the name with the required identifier. $progressId.attr('name', originalName.match(/APC_UPLOAD_PROGRESS|UPLOAD_IDENTIFIER/)[0]); // Restore the original name after the upload begins. setTimeout(function () { $progressId.attr('name', originalName); }, 1000); } // Show the progress bar if the upload takes longer than half a second. setTimeout(function () { $clickedButton.closest('div.form-managed-file').find('div.ajax-progress-bar').slideDown(); }, 500); }, /** * Open links to files within forms in a new window. */ openInNewWindow: function (event) { event.preventDefault(); $(this).attr('target', '_blank'); window.open(this.href, 'filePreview', 'toolbar=0,scrollbars=1,location=1,statusbar=1,menubar=0,resizable=1,width=500,height=550'); } }; })(jQuery, Drupal);<|fim▁end|>
attach: function (context) {
<|file_name|>issue-28105.rs<|end_file_name|><|fim▁begin|>// Make sure that a continue span actually contains the keyword.<|fim▁hole|> continue //~ ERROR `continue` outside of a loop ; break //~ ERROR `break` outside of a loop ; }<|fim▁end|>
fn main() {
<|file_name|>qgscurveeditorwidget.cpp<|end_file_name|><|fim▁begin|>/*************************************************************************** qgscurveeditorwidget.cpp ------------------------ begin : February 2017<|fim▁hole|> *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include "qgscurveeditorwidget.h" #include "qgsvectorlayer.h" #include <QPainter> #include <QVBoxLayout> #include <QMouseEvent> #include <algorithm> // QWT Charting widget #include <qwt_global.h> #include <qwt_plot_canvas.h> #include <qwt_plot.h> #include <qwt_plot_curve.h> #include <qwt_plot_grid.h> #include <qwt_plot_marker.h> #include <qwt_plot_picker.h> #include <qwt_picker_machine.h> #include <qwt_plot_layout.h> #include <qwt_symbol.h> #include <qwt_legend.h> #include <qwt_plot_renderer.h> #include <qwt_plot_histogram.h> QgsCurveEditorWidget::QgsCurveEditorWidget( QWidget *parent, const QgsCurveTransform &transform ) : QWidget( parent ) , mCurve( transform ) { mPlot = new QwtPlot(); mPlot->setMinimumSize( QSize( 0, 100 ) ); mPlot->setAxisScale( QwtPlot::yLeft, 0, 1 ); mPlot->setAxisScale( QwtPlot::yRight, 0, 1 ); mPlot->setAxisScale( QwtPlot::xBottom, 0, 1 ); mPlot->setAxisScale( QwtPlot::xTop, 0, 1 ); QVBoxLayout *vlayout = new QVBoxLayout(); vlayout->addWidget( mPlot ); setLayout( vlayout ); // hide the ugly canvas frame mPlot->setFrameStyle( QFrame::NoFrame ); QFrame *plotCanvasFrame = dynamic_cast<QFrame *>( mPlot->canvas() ); if ( plotCanvasFrame ) plotCanvasFrame->setFrameStyle( QFrame::NoFrame ); mPlot->enableAxis( QwtPlot::yLeft, false ); mPlot->enableAxis( QwtPlot::xBottom, false ); // add a grid QwtPlotGrid *grid = new QwtPlotGrid(); QwtScaleDiv gridDiv( 0.0, 1.0, QList<double>(), QList<double>(), QList<double>() << 0.2 << 0.4 << 0.6 << 0.8 ); grid->setXDiv( gridDiv ); grid->setYDiv( gridDiv ); grid->setPen( QPen( QColor( 0, 0, 0, 50 ) ) ); grid->attach( mPlot ); mPlotCurve = new QwtPlotCurve(); mPlotCurve->setTitle( QStringLiteral( "Curve" ) ); mPlotCurve->setPen( QPen( QColor( 30, 30, 30 ), 0.0 ) ), mPlotCurve->setRenderHint( QwtPlotItem::RenderAntialiased, true ); mPlotCurve->attach( mPlot ); mPlotFilter = new QgsCurveEditorPlotEventFilter( mPlot ); connect( mPlotFilter, &QgsCurveEditorPlotEventFilter::mousePress, this, &QgsCurveEditorWidget::plotMousePress ); connect( mPlotFilter, &QgsCurveEditorPlotEventFilter::mouseRelease, this, &QgsCurveEditorWidget::plotMouseRelease ); connect( mPlotFilter, &QgsCurveEditorPlotEventFilter::mouseMove, this, &QgsCurveEditorWidget::plotMouseMove ); mPlotCurve->setVisible( true ); updatePlot(); } QgsCurveEditorWidget::~QgsCurveEditorWidget() { if ( mGatherer && mGatherer->isRunning() ) { connect( mGatherer.get(), &QgsHistogramValuesGatherer::finished, mGatherer.get(), &QgsHistogramValuesGatherer::deleteLater ); mGatherer->stop(); ( void )mGatherer.release(); } } void QgsCurveEditorWidget::setCurve( const QgsCurveTransform &curve ) { mCurve = curve; updatePlot(); emit changed(); } void QgsCurveEditorWidget::setHistogramSource( const QgsVectorLayer *layer, const QString &expression ) { if ( !mGatherer ) { mGatherer.reset( new QgsHistogramValuesGatherer() ); connect( mGatherer.get(), &QgsHistogramValuesGatherer::calculatedHistogram, this, [ = ] { mHistogram.reset( new QgsHistogram( mGatherer->histogram() ) ); updateHistogram(); } ); } bool changed = mGatherer->layer() != layer || mGatherer->expression() != expression; if ( changed ) { mGatherer->setExpression( expression ); mGatherer->setLayer( layer ); mGatherer->start(); if ( mGatherer->isRunning() ) { //stop any currently running task mGatherer->stop(); while ( mGatherer->isRunning() ) { QCoreApplication::processEvents(); } } mGatherer->start(); } else { updateHistogram(); } } void QgsCurveEditorWidget::setMinHistogramValueRange( double minValueRange ) { mMinValueRange = minValueRange; updateHistogram(); } void QgsCurveEditorWidget::setMaxHistogramValueRange( double maxValueRange ) { mMaxValueRange = maxValueRange; updateHistogram(); } void QgsCurveEditorWidget::keyPressEvent( QKeyEvent *event ) { if ( event->key() == Qt::Key_Delete || event->key() == Qt::Key_Backspace ) { QList< QgsPointXY > cp = mCurve.controlPoints(); if ( mCurrentPlotMarkerIndex > 0 && mCurrentPlotMarkerIndex < cp.count() - 1 ) { cp.removeAt( mCurrentPlotMarkerIndex ); mCurve.setControlPoints( cp ); updatePlot(); emit changed(); } } } void QgsCurveEditorWidget::plotMousePress( QPointF point ) { mCurrentPlotMarkerIndex = findNearestControlPoint( point ); if ( mCurrentPlotMarkerIndex < 0 ) { // add a new point mCurve.addControlPoint( point.x(), point.y() ); mCurrentPlotMarkerIndex = findNearestControlPoint( point ); emit changed(); } updatePlot(); } int QgsCurveEditorWidget::findNearestControlPoint( QPointF point ) const { double minDist = 3.0 / mPlot->width(); int currentPlotMarkerIndex = -1; QList< QgsPointXY > controlPoints = mCurve.controlPoints(); for ( int i = 0; i < controlPoints.count(); ++i ) { QgsPointXY currentPoint = controlPoints.at( i ); double currentDist; currentDist = std::pow( point.x() - currentPoint.x(), 2.0 ) + std::pow( point.y() - currentPoint.y(), 2.0 ); if ( currentDist < minDist ) { minDist = currentDist; currentPlotMarkerIndex = i; } } return currentPlotMarkerIndex; } void QgsCurveEditorWidget::plotMouseRelease( QPointF ) { } void QgsCurveEditorWidget::plotMouseMove( QPointF point ) { if ( mCurrentPlotMarkerIndex < 0 ) return; QList< QgsPointXY > cp = mCurve.controlPoints(); bool removePoint = false; if ( mCurrentPlotMarkerIndex == 0 ) { point.setX( std::min( point.x(), cp.at( 1 ).x() - 0.01 ) ); } else { removePoint = point.x() <= cp.at( mCurrentPlotMarkerIndex - 1 ).x(); } if ( mCurrentPlotMarkerIndex == cp.count() - 1 ) { point.setX( std::max( point.x(), cp.at( mCurrentPlotMarkerIndex - 1 ).x() + 0.01 ) ); removePoint = false; } else { removePoint = removePoint || point.x() >= cp.at( mCurrentPlotMarkerIndex + 1 ).x(); } if ( removePoint ) { cp.removeAt( mCurrentPlotMarkerIndex ); mCurrentPlotMarkerIndex = -1; } else { cp[ mCurrentPlotMarkerIndex ] = QgsPointXY( point.x(), point.y() ); } mCurve.setControlPoints( cp ); updatePlot(); emit changed(); } void QgsCurveEditorWidget::addPlotMarker( double x, double y, bool isSelected ) { QColor borderColor( 0, 0, 0 ); QColor brushColor = isSelected ? borderColor : QColor( 255, 255, 255, 0 ); QwtPlotMarker *marker = new QwtPlotMarker(); marker->setSymbol( new QwtSymbol( QwtSymbol::Ellipse, QBrush( brushColor ), QPen( borderColor, isSelected ? 2 : 1 ), QSize( 8, 8 ) ) ); marker->setValue( x, y ); marker->attach( mPlot ); marker->setRenderHint( QwtPlotItem::RenderAntialiased, true ); mMarkers << marker; } void QgsCurveEditorWidget::updateHistogram() { if ( !mHistogram ) return; //draw histogram QBrush histoBrush( QColor( 0, 0, 0, 70 ) ); delete mPlotHistogram; mPlotHistogram = createPlotHistogram( histoBrush ); QVector<QwtIntervalSample> dataHisto; int bins = 40; QList<double> edges = mHistogram->binEdges( bins ); QList<int> counts = mHistogram->counts( bins ); // scale counts to 0->1 double max = *std::max_element( counts.constBegin(), counts.constEnd() ); // scale bin edges to fit in 0->1 range if ( !qgsDoubleNear( mMinValueRange, mMaxValueRange ) ) { std::transform( edges.begin(), edges.end(), edges.begin(), [this]( double d ) -> double { return ( d - mMinValueRange ) / ( mMaxValueRange - mMinValueRange ); } ); } for ( int bin = 0; bin < bins; ++bin ) { double binValue = counts.at( bin ) / max; double upperEdge = edges.at( bin + 1 ); dataHisto << QwtIntervalSample( binValue, edges.at( bin ), upperEdge ); } mPlotHistogram->setSamples( dataHisto ); mPlotHistogram->attach( mPlot ); mPlot->replot(); } void QgsCurveEditorWidget::updatePlot() { // remove existing markers Q_FOREACH ( QwtPlotMarker *marker, mMarkers ) { marker->detach(); delete marker; } mMarkers.clear(); QPolygonF curvePoints; QVector< double > x; int i = 0; Q_FOREACH ( const QgsPointXY &point, mCurve.controlPoints() ) { x << point.x(); addPlotMarker( point.x(), point.y(), mCurrentPlotMarkerIndex == i ); i++; } //add extra intermediate points for ( double p = 0; p <= 1.0; p += 0.01 ) { x << p; } std::sort( x.begin(), x.end() ); QVector< double > y = mCurve.y( x ); for ( int j = 0; j < x.count(); ++j ) { curvePoints << QPointF( x.at( j ), y.at( j ) ); } mPlotCurve->setSamples( curvePoints ); mPlot->replot(); } QwtPlotHistogram *QgsCurveEditorWidget::createPlotHistogram( const QBrush &brush, const QPen &pen ) const { QwtPlotHistogram *histogram = new QwtPlotHistogram( QString() ); histogram->setBrush( brush ); if ( pen != Qt::NoPen ) { histogram->setPen( pen ); } else if ( brush.color().lightness() > 200 ) { QPen p; p.setColor( brush.color().darker( 150 ) ); p.setWidth( 0 ); p.setCosmetic( true ); histogram->setPen( p ); } else { histogram->setPen( QPen( Qt::NoPen ) ); } return histogram; } /// @cond PRIVATE QgsCurveEditorPlotEventFilter::QgsCurveEditorPlotEventFilter( QwtPlot *plot ) : QObject( plot ) , mPlot( plot ) { mPlot->canvas()->installEventFilter( this ); } bool QgsCurveEditorPlotEventFilter::eventFilter( QObject *object, QEvent *event ) { if ( !mPlot->isEnabled() ) return QObject::eventFilter( object, event ); switch ( event->type() ) { case QEvent::MouseButtonPress: { const QMouseEvent *mouseEvent = static_cast<QMouseEvent * >( event ); if ( mouseEvent->button() == Qt::LeftButton ) { emit mousePress( mapPoint( mouseEvent->pos() ) ); } break; } case QEvent::MouseMove: { const QMouseEvent *mouseEvent = static_cast<QMouseEvent * >( event ); if ( mouseEvent->buttons() & Qt::LeftButton ) { // only emit when button pressed emit mouseMove( mapPoint( mouseEvent->pos() ) ); } break; } case QEvent::MouseButtonRelease: { const QMouseEvent *mouseEvent = static_cast<QMouseEvent * >( event ); if ( mouseEvent->button() == Qt::LeftButton ) { emit mouseRelease( mapPoint( mouseEvent->pos() ) ); } break; } default: break; } return QObject::eventFilter( object, event ); } QPointF QgsCurveEditorPlotEventFilter::mapPoint( QPointF point ) const { if ( !mPlot ) return QPointF(); return QPointF( mPlot->canvasMap( QwtPlot::xBottom ).invTransform( point.x() ), mPlot->canvasMap( QwtPlot::yLeft ).invTransform( point.y() ) ); } ///@endcond<|fim▁end|>
copyright : (C) 2017 by Nyall Dawson email : nyall dot dawson at gmail dot com
<|file_name|>mode-ruby.js<|end_file_name|><|fim▁begin|>/* ***** BEGIN LICENSE BLOCK ***** * Distributed under the BSD license: * * Copyright (c) 2010, Ajax.org B.V. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Ajax.org B.V. nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL AJAX.ORG B.V. BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ***** END LICENSE BLOCK ***** */ __ace_shadowed__.define('ace/mode/ruby', ['require', 'exports', 'module' , 'ace/lib/oop', 'ace/mode/text', 'ace/tokenizer', 'ace/mode/ruby_highlight_rules', 'ace/mode/matching_brace_outdent', 'ace/range', 'ace/mode/folding/coffee'], function(require, exports, module) { var oop = require("../lib/oop"); var TextMode = require("./text").Mode; var Tokenizer = require("../tokenizer").Tokenizer; var RubyHighlightRules = require("./ruby_highlight_rules").RubyHighlightRules; var MatchingBraceOutdent = require("./matching_brace_outdent").MatchingBraceOutdent; var Range = require("../range").Range; var FoldMode = require("./folding/coffee").FoldMode; var Mode = function() { this.HighlightRules = RubyHighlightRules; this.$outdent = new MatchingBraceOutdent(); this.foldingRules = new FoldMode(); }; oop.inherits(Mode, TextMode); (function() { this.lineCommentStart = "#"; this.getNextLineIndent = function(state, line, tab) { var indent = this.$getIndent(line); var tokenizedLine = this.getTokenizer().getLineTokens(line, state); var tokens = tokenizedLine.tokens; if (tokens.length && tokens[tokens.length-1].type == "comment") { return indent; } if (state == "start") { var match = line.match(/^.*[\{\(\[]\s*$/); var startingClassOrMethod = line.match(/^\s*(class|def|module)\s.*$/); var startingDoBlock = line.match(/.*do(\s*|\s+\|.*\|\s*)$/); var startingConditional = line.match(/^\s*(if|else)\s*/) if (match || startingClassOrMethod || startingDoBlock || startingConditional) { indent += tab; } } return indent; }; this.checkOutdent = function(state, line, input) { return /^\s+end$/.test(line + input) || /^\s+}$/.test(line + input) || /^\s+else$/.test(line + input); }; this.autoOutdent = function(state, doc, row) { var indent = this.$getIndent(doc.getLine(row)); var tab = doc.getTabString(); if (indent.slice(-tab.length) == tab) doc.remove(new Range(row, indent.length-tab.length, row, indent.length)); }; this.$id = "ace/mode/ruby"; }).call(Mode.prototype); exports.Mode = Mode; }); __ace_shadowed__.define('ace/mode/ruby_highlight_rules', ['require', 'exports', 'module' , 'ace/lib/oop', 'ace/mode/text_highlight_rules'], function(require, exports, module) { var oop = require("../lib/oop"); var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules; var constantOtherSymbol = exports.constantOtherSymbol = { token : "constant.other.symbol.ruby", // symbol regex : "[:](?:[A-Za-z_]|[@$](?=[a-zA-Z0-9_]))[a-zA-Z0-9_]*[!=?]?" }; var qString = exports.qString = { token : "string", // single line regex : "['](?:(?:\\\\.)|(?:[^'\\\\]))*?[']" }; var qqString = exports.qqString = { token : "string", // single line regex : '["](?:(?:\\\\.)|(?:[^"\\\\]))*?["]' }; var tString = exports.tString = { token : "string", // backtick string regex : "[`](?:(?:\\\\.)|(?:[^'\\\\]))*?[`]" }; var constantNumericHex = exports.constantNumericHex = { token : "constant.numeric", // hex regex : "0[xX][0-9a-fA-F](?:[0-9a-fA-F]|_(?=[0-9a-fA-F]))*\\b" }; var constantNumericFloat = exports.constantNumericFloat = { token : "constant.numeric", // float regex : "[+-]?\\d(?:\\d|_(?=\\d))*(?:(?:\\.\\d(?:\\d|_(?=\\d))*)?(?:[eE][+-]?\\d+)?)?\\b" }; var RubyHighlightRules = function() { var builtinFunctions = ( "abort|Array|assert|assert_equal|assert_not_equal|assert_same|assert_not_same|" + "assert_nil|assert_not_nil|assert_match|assert_no_match|assert_in_delta|assert_throws|" + "assert_raise|assert_nothing_raised|assert_instance_of|assert_kind_of|assert_respond_to|" + "assert_operator|assert_send|assert_difference|assert_no_difference|assert_recognizes|" + "assert_generates|assert_response|assert_redirected_to|assert_template|assert_select|" + "assert_select_email|assert_select_rjs|assert_select_encoded|css_select|at_exit|" + "attr|attr_writer|attr_reader|attr_accessor|attr_accessible|autoload|binding|block_given?|callcc|" + "caller|catch|chomp|chomp!|chop|chop!|defined?|delete_via_redirect|eval|exec|exit|" + "exit!|fail|Float|flunk|follow_redirect!|fork|form_for|form_tag|format|gets|global_variables|gsub|" + "gsub!|get_via_redirect|host!|https?|https!|include|Integer|lambda|link_to|" + "link_to_unless_current|link_to_function|link_to_remote|load|local_variables|loop|open|open_session|" + "p|print|printf|proc|putc|puts|post_via_redirect|put_via_redirect|raise|rand|" + "raw|readline|readlines|redirect?|request_via_redirect|require|scan|select|" + "set_trace_func|sleep|split|sprintf|srand|String|stylesheet_link_tag|syscall|system|sub|sub!|test|" + "throw|trace_var|trap|untrace_var|atan2|cos|exp|frexp|ldexp|log|log10|sin|sqrt|tan|" + "render|javascript_include_tag|csrf_meta_tag|label_tag|text_field_tag|submit_tag|check_box_tag|" + "content_tag|radio_button_tag|text_area_tag|password_field_tag|hidden_field_tag|" + "fields_for|select_tag|options_for_select|options_from_collection_for_select|collection_select|" + "time_zone_select|select_date|select_time|select_datetime|date_select|time_select|datetime_select|" + "select_year|select_month|select_day|select_hour|select_minute|select_second|file_field_tag|" + "file_field|respond_to|skip_before_filter|around_filter|after_filter|verify|" + "protect_from_forgery|rescue_from|helper_method|redirect_to|before_filter|" + "send_data|send_file|validates_presence_of|validates_uniqueness_of|validates_length_of|" + "validates_format_of|validates_acceptance_of|validates_associated|validates_exclusion_of|" + "validates_inclusion_of|validates_numericality_of|validates_with|validates_each|" + "authenticate_or_request_with_http_basic|authenticate_or_request_with_http_digest|" + "filter_parameter_logging|match|get|post|resources|redirect|scope|assert_routing|" + "translate|localize|extract_locale_from_tld|caches_page|expire_page|caches_action|expire_action|" + "cache|expire_fragment|expire_cache_for|observe|cache_sweeper|" + "has_many|has_one|belongs_to|has_and_belongs_to_many" ); var keywords = ( "alias|and|BEGIN|begin|break|case|class|def|defined|do|else|elsif|END|end|ensure|" + "__FILE__|finally|for|gem|if|in|__LINE__|module|next|not|or|private|protected|public|" + "redo|rescue|retry|return|super|then|undef|unless|until|when|while|yield" ); var buildinConstants = ( "true|TRUE|false|FALSE|nil|NIL|ARGF|ARGV|DATA|ENV|RUBY_PLATFORM|RUBY_RELEASE_DATE|" + "RUBY_VERSION|STDERR|STDIN|STDOUT|TOPLEVEL_BINDING" ); var builtinVariables = ( "\$DEBUG|\$defout|\$FILENAME|\$LOAD_PATH|\$SAFE|\$stdin|\$stdout|\$stderr|\$VERBOSE|" + "$!|root_url|flash|session|cookies|params|request|response|logger|self" ); var keywordMapper = this.$keywords = this.createKeywordMapper({ "keyword": keywords, "constant.language": buildinConstants, "variable.language": builtinVariables, "support.function": builtinFunctions, "invalid.deprecated": "debugger" // TODO is this a remnant from js mode? }, "identifier"); this.$rules = { "start" : [ { token : "comment", regex : "#.*$" }, { token : "comment", // multi line comment regex : "^=begin(?:$|\\s.*$)", next : "comment" }, { token : "string.regexp", regex : "[/](?:(?:\\[(?:\\\\]|[^\\]])+\\])|(?:\\\\/|[^\\]/]))*[/]\\w*\\s*(?=[).,;]|$)" }, qString, qqString, tString, { token : "text", // namespaces aren't symbols regex : "::" }, { token : "variable.instance", // instance variable regex : "@{1,2}[a-zA-Z_\\d]+" }, { token : "support.class", // class name regex : "[A-Z][a-zA-Z_\\d]+" }, constantOtherSymbol, constantNumericHex, constantNumericFloat, { token : "constant.language.boolean", regex : "(?:true|false)\\b" }, { token : keywordMapper, regex : "[a-zA-Z_$][a-zA-Z0-9_$]*\\b" }, { token : "punctuation.separator.key-value", regex : "=>" }, { stateName: "heredoc", onMatch : function(value, currentState, stack) { var next = value[2] == '-' ? "indentedHeredoc" : "heredoc"; var tokens = value.split(this.splitRegex); stack.push(next, tokens[3]); return [ {type:"constant", value: tokens[1]}, {type:"string", value: tokens[2]}, {type:"support.class", value: tokens[3]}, {type:"string", value: tokens[4]} ]; }, regex : "(<<-?)(['\"`]?)([\\w]+)(['\"`]?)", rules: { heredoc: [{ onMatch: function(value, currentState, stack) { if (value === stack[1]) { stack.shift(); stack.shift(); this.next = stack[0] || "start"; return "support.class"; } this.next = ""; return "string"; }, regex: ".*$", next: "start" }], indentedHeredoc: [{ token: "string", regex: "^ +" }, { onMatch: function(value, currentState, stack) { if (value === stack[1]) { stack.shift(); stack.shift(); this.next = stack[0] || "start"; return "support.class"; } this.next = ""; return "string"; }, regex: ".*$", next: "start" }] } }, { regex : "$", token : "empty", next : function(currentState, stack) { if (stack[0] === "heredoc" || stack[0] === "indentedHeredoc") return stack[0]; return currentState; } }, { token : "keyword.operator", regex : "!|\\$|%|&|\\*|\\-\\-|\\-|\\+\\+|\\+|~|===|==|=|!=|!==|<=|>=|<<=|>>=|>>>=|<>|<|>|!|&&|\\|\\||\\?\\:|\\*=|%=|\\+=|\\-=|&=|\\^=|\\b(?:in|instanceof|new|delete|typeof|void)" }, { token : "paren.lparen", regex : "[[({]" }, { token : "paren.rparen", regex : "[\\])}]" }, { token : "text", regex : "\\s+" } ], "comment" : [ { token : "comment", // closing comment regex : "^=end(?:$|\\s.*$)", next : "start" }, { token : "comment", // comment spanning whole line regex : ".+" } ] }; this.normalizeRules(); }; oop.inherits(RubyHighlightRules, TextHighlightRules); exports.RubyHighlightRules = RubyHighlightRules; }); __ace_shadowed__.define('ace/mode/matching_brace_outdent', ['require', 'exports', 'module' , 'ace/range'], function(require, exports, module) { var Range = require("../range").Range; var MatchingBraceOutdent = function() {}; (function() { this.checkOutdent = function(line, input) { if (! /^\s+$/.test(line)) return false; return /^\s*\}/.test(input); }; this.autoOutdent = function(doc, row) { var line = doc.getLine(row); var match = line.match(/^(\s*\})/); if (!match) return 0; var column = match[1].length; var openBracePos = doc.findMatchingBracket({row: row, column: column}); if (!openBracePos || openBracePos.row == row) return 0; var indent = this.$getIndent(doc.getLine(openBracePos.row)); doc.replace(new Range(row, 0, row, column-1), indent); }; this.$getIndent = function(line) { return line.match(/^\s*/)[0]; }; }).call(MatchingBraceOutdent.prototype); exports.MatchingBraceOutdent = MatchingBraceOutdent; }); __ace_shadowed__.define('ace/mode/folding/coffee', ['require', 'exports', 'module' , 'ace/lib/oop', 'ace/mode/folding/fold_mode', 'ace/range'], function(require, exports, module) { var oop = require("../../lib/oop"); var BaseFoldMode = require("./fold_mode").FoldMode; var Range = require("../../range").Range; var FoldMode = exports.FoldMode = function() {}; oop.inherits(FoldMode, BaseFoldMode); (function() { this.getFoldWidgetRange = function(session, foldStyle, row) { var range = this.indentationBlock(session, row); if (range) return range; var re = /\S/; var line = session.getLine(row); var startLevel = line.search(re); if (startLevel == -1 || line[startLevel] != "#") return; var startColumn = line.length; var maxRow = session.getLength(); var startRow = row; var endRow = row; while (++row < maxRow) { line = session.getLine(row); var level = line.search(re); if (level == -1) continue; if (line[level] != "#") break; endRow = row; } if (endRow > startRow) { var endColumn = session.getLine(endRow).length; return new Range(startRow, startColumn, endRow, endColumn); } }; this.getFoldWidget = function(session, foldStyle, row) { var line = session.getLine(row); var indent = line.search(/\S/); var next = session.getLine(row + 1); var prev = session.getLine(row - 1); var prevIndent = prev.search(/\S/); var nextIndent = next.search(/\S/); if (indent == -1) { session.foldWidgets[row - 1] = prevIndent!= -1 && prevIndent < nextIndent ? "start" : ""; return "";<|fim▁hole|> if (indent == nextIndent && line[indent] == "#" && next[indent] == "#") { session.foldWidgets[row - 1] = ""; session.foldWidgets[row + 1] = ""; return "start"; } } else if (prevIndent == indent && line[indent] == "#" && prev[indent] == "#") { if (session.getLine(row - 2).search(/\S/) == -1) { session.foldWidgets[row - 1] = "start"; session.foldWidgets[row + 1] = ""; return ""; } } if (prevIndent!= -1 && prevIndent < indent) session.foldWidgets[row - 1] = "start"; else session.foldWidgets[row - 1] = ""; if (indent < nextIndent) return "start"; else return ""; }; }).call(FoldMode.prototype); });<|fim▁end|>
} if (prevIndent == -1) {
<|file_name|>TestTimeField.py<|end_file_name|><|fim▁begin|># vim: set ff=unix expandtab ts=4 sw=4: from unittest import TestCase, main import numpy as np from CompartmentalSystems.bins.TimeField import TimeField class TestTimeField(TestCase): def setUp(self): self.ar = np.zeros(3)<|fim▁hole|> self.arr[2, 1] = 2 def test_number_of_Ts_entries(self): tf = TimeField(self.arr, 0.1) self.assertEqual(tf.number_of_Ts_entries, 3) if __name__ == "__main__": main()<|fim▁end|>
self.ar[2] = 2 self.arr = np.zeros((3, 2))
<|file_name|>wsgi.py<|end_file_name|><|fim▁begin|>""" WSGI config for ncbi project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ncbi.settings") <|fim▁hole|><|fim▁end|>
application = get_wsgi_application()
<|file_name|>client_test.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Import python libs from __future__ import absolute_import <|fim▁hole|> # Import Salt Testing Libs from salttesting import TestCase, skipIf from salttesting.mock import ( NO_MOCK, NO_MOCK_REASON, ) from salttesting.helpers import ensure_in_syspath ensure_in_syspath('../../') @skipIf(NO_MOCK, NO_MOCK_REASON) class ClientACLTestCase(TestCase): ''' Unit tests for salt.acl.ClientACL ''' def setUp(self): self.blacklist = { 'users': ['joker', 'penguin'], 'modules': ['cmd.run', 'test.fib'], } def test_user_is_blacklisted(self): ''' test user_is_blacklisted ''' client_acl = acl.PublisherACL(self.blacklist) self.assertTrue(client_acl.user_is_blacklisted('joker')) self.assertTrue(client_acl.user_is_blacklisted('penguin')) self.assertFalse(client_acl.user_is_blacklisted('batman')) self.assertFalse(client_acl.user_is_blacklisted('robin')) def test_cmd_is_blacklisted(self): ''' test cmd_is_blacklisted ''' client_acl = acl.PublisherACL(self.blacklist) self.assertTrue(client_acl.cmd_is_blacklisted('cmd.run')) self.assertTrue(client_acl.cmd_is_blacklisted('test.fib')) self.assertFalse(client_acl.cmd_is_blacklisted('cmd.shell')) self.assertFalse(client_acl.cmd_is_blacklisted('test.versions')) self.assertTrue(client_acl.cmd_is_blacklisted(['cmd.run', 'state.sls'])) self.assertFalse(client_acl.cmd_is_blacklisted(['state.highstate', 'state.sls'])) if __name__ == '__main__': from integration import run_tests run_tests(ClientACLTestCase, needs_daemon=False)<|fim▁end|>
# Import Salt Libs from salt import acl
<|file_name|>modistile.js<|end_file_name|><|fim▁begin|>define([ 'aeris/util', 'aeris/errors/invalidargumenterror', 'aeris/maps/layers/aeristile' ], function(_, InvalidArgumentError, AerisTile) { /** * Representation of an Aeris Modis layer. * * @constructor * @class aeris.maps.layers.ModisTile * @extends aeris.maps.layers.AerisTile */ var ModisTile = function(opt_attrs, opt_options) { var options = _.extend({ period: 14 }, opt_options); var attrs = _.extend({ autoUpdateInterval: AerisTile.updateIntervals.MODIS, /** * Hash of available tileType codes by period * Used to dynamically create layer's tileType * * @attribute modisPeriodTileTypes * @type {Object.<number, string>} */ modisPeriodTileTypes: { /* eg 1: "modis_tileType_1day", 3: "modis_tileType_3day" */ } }, opt_attrs); // Set initial tileType _.extend(attrs, { tileType: attrs.modisPeriodTileTypes[options.period] }); AerisTile.call(this, attrs, opt_options); this.setModisPeriod(options.period); }; // Inherit from AerisTile _.inherits(ModisTile, AerisTile); /** * @param {number} period * @throws {aeris.errors.InvalidArgumentError} If the layer does not support the given MODIS period. */ ModisTile.prototype.setModisPeriod = function(period) { var validPeriods = _.keys(this.get('modisPeriodTileTypes'));<|fim▁hole|> period = parseInt(period); // Validate period if (!period || period < 1) { throw new InvalidArgumentError('Invalid MODIS period: period must be a positive integer'); } if (!(period in this.get('modisPeriodTileTypes'))) { throw new InvalidArgumentError('Invalid MODIS periods: available periods are: ' + validPeriods.join(',')); } // Set new tile type this.set('tileType', this.get('modisPeriodTileTypes')[period], { validate: true }); }; return ModisTile; });<|fim▁end|>
<|file_name|>maybeArrayLike.js<|end_file_name|><|fim▁begin|>var _Array$isArray = require("../core-js/array/is-array"); var arrayLikeToArray = require("./arrayLikeToArray"); function _maybeArrayLike(next, arr, i) { if (arr && !_Array$isArray(arr) && typeof arr.length === "number") { var len = arr.length; return arrayLikeToArray(arr, i !== void 0 && i < len ? i : len); }<|fim▁hole|> return next(arr, i); } module.exports = _maybeArrayLike;<|fim▁end|>
<|file_name|>test_iddgaps.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 Santosh Philip # ======================================================================= # Distributed under the MIT License. # (See accompanying file LICENSE or copy at # http://opensource.org/licenses/MIT) # ======================================================================= """pytest for iddgaps.py""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import eppy.iddgaps as iddgaps def test_cleaniddfield(): """pytest for cleaniddfield""" data = (( { 'field': ['Water Supply Storage Tank Name'], 'Field': ['Water Supply Storage Tank Name'], 'object-list': ['WaterStorageTankNames'], 'type': ['object-list'] }, { 'field': ['Water Supply Storage Tank Name'], 'object-list': ['WaterStorageTankNames'], 'type': ['object-list'] } ), #field, newfield ) for field, newfield in data: result = iddgaps.cleaniddfield(field)<|fim▁hole|><|fim▁end|>
assert result == newfield
<|file_name|>db_snapshot.go<|end_file_name|><|fim▁begin|>// Copyright (c) 2012, Suryandaru Triandana <[email protected]> // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package leveldb import ( "container/list" "fmt" "runtime" "sync" "sync/atomic" "camlistore.org/third_party/github.com/syndtr/goleveldb/leveldb/iterator" "camlistore.org/third_party/github.com/syndtr/goleveldb/leveldb/opt" "camlistore.org/third_party/github.com/syndtr/goleveldb/leveldb/util" ) type snapshotElement struct { seq uint64 ref int e *list.Element } // Acquires a snapshot, based on latest sequence. func (db *DB) acquireSnapshot() *snapshotElement { db.snapsMu.Lock() defer db.snapsMu.Unlock() seq := db.getSeq() if e := db.snapsList.Back(); e != nil { se := e.Value.(*snapshotElement) if se.seq == seq { se.ref++ return se } else if seq < se.seq { panic("leveldb: sequence number is not increasing") } } se := &snapshotElement{seq: seq, ref: 1} se.e = db.snapsList.PushBack(se) return se } // Releases given snapshot element. func (db *DB) releaseSnapshot(se *snapshotElement) { db.snapsMu.Lock() defer db.snapsMu.Unlock() se.ref-- if se.ref == 0 { db.snapsList.Remove(se.e) se.e = nil } else if se.ref < 0 { panic("leveldb: Snapshot: negative element reference") } } // Gets minimum sequence that not being snapshoted. func (db *DB) minSeq() uint64 { db.snapsMu.Lock() defer db.snapsMu.Unlock() if e := db.snapsList.Front(); e != nil { return e.Value.(*snapshotElement).seq } return db.getSeq() } // Snapshot is a DB snapshot. type Snapshot struct { db *DB elem *snapshotElement mu sync.RWMutex released bool } // Creates new snapshot object. func (db *DB) newSnapshot() *Snapshot { snap := &Snapshot{ db: db, elem: db.acquireSnapshot(), } atomic.AddInt32(&db.aliveSnaps, 1) runtime.SetFinalizer(snap, (*Snapshot).Release) return snap } func (snap *Snapshot) String() string { return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq) } // Get gets the value for the given key. It returns ErrNotFound if // the DB does not contains the key. // // The caller should not modify the contents of the returned slice, but // it is safe to modify the contents of the argument after Get returns. func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { err = snap.db.ok() if err != nil { return } snap.mu.RLock() defer snap.mu.RUnlock() if snap.released { err = ErrSnapshotReleased return } return snap.db.get(key, snap.elem.seq, ro) } // Has returns true if the DB does contains the given key. // // It is safe to modify the contents of the argument after Get returns. func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { err = snap.db.ok() if err != nil { return } snap.mu.RLock() defer snap.mu.RUnlock() if snap.released { err = ErrSnapshotReleased return } return snap.db.has(key, snap.elem.seq, ro) } // NewIterator returns an iterator for the snapshot of the uderlying DB. // The returned iterator is not goroutine-safe, but it is safe to use // multiple iterators concurrently, with each in a dedicated goroutine. // It is also safe to use an iterator concurrently with modifying its // underlying DB. The resultant key/value pairs are guaranteed to be // consistent. // // Slice allows slicing the iterator to only contains keys in the given // range. A nil Range.Start is treated as a key before all keys in the // DB. And a nil Range.Limit is treated as a key after all keys in // the DB. //<|fim▁hole|>// The iterator must be released after use, by calling Release method. // Releasing the snapshot doesn't mean releasing the iterator too, the // iterator would be still valid until released. // // Also read Iterator documentation of the leveldb/iterator package. func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { if err := snap.db.ok(); err != nil { return iterator.NewEmptyIterator(err) } snap.mu.Lock() defer snap.mu.Unlock() if snap.released { return iterator.NewEmptyIterator(ErrSnapshotReleased) } // Since iterator already hold version ref, it doesn't need to // hold snapshot ref. return snap.db.newIterator(snap.elem.seq, slice, ro) } // Release releases the snapshot. This will not release any returned // iterators, the iterators would still be valid until released or the // underlying DB is closed. // // Other methods should not be called after the snapshot has been released. func (snap *Snapshot) Release() { snap.mu.Lock() defer snap.mu.Unlock() if !snap.released { // Clear the finalizer. runtime.SetFinalizer(snap, nil) snap.released = true snap.db.releaseSnapshot(snap.elem) atomic.AddInt32(&snap.db.aliveSnaps, -1) snap.db = nil snap.elem = nil } }<|fim▁end|>
<|file_name|>testcli.py<|end_file_name|><|fim▁begin|>import sphivedbcli import time import sys def printResultSet( rs ): print "row.count %d" % ( rs.getRowCount() ) columnCount = rs.getColumnCount() hdrs = "" for i in range( columnCount ): hdrs = hdrs + ( "\t%s(%s)" % ( rs.getName( i ), rs.getType( i ) ) ) print hdrs for i in range( rs.getRowCount() ): rs.moveTo( i ) row = "" for j in range( columnCount ): row = row + ( "\t[%s]" % ( rs.getString( j ) ) ) print row if __name__ == "__main__": if len( sys.argv ) != 2: print "Usage: %s <config file>" % ( sys.argv[0] ) print "\tpython %s ../../sphivedb/sphivedbcli.ini" % ( sys.argv[0] )<|fim▁hole|> configFile = sys.argv[1] cli = sphivedbcli.SPHiveDBClient() cli.init( configFile ) try: resp = cli.execute( 0, "foobar", "addrbook", \ [ "insert into addrbook ( addr ) values ( \"%d\" )" % ( time.time() ), \ "select * from addrbook" ] ) if 0 == resp.getErrorCode(): rsCount = resp.getResultCount() for i in range( rsCount ): rs = resp.getResultSet( i ) printResultSet( rs ) else: print "%d: %s" % ( resp.getErrdataCode(), resp.getErrdataMsg() ) except Exception, e: print e<|fim▁end|>
sys.exit( -1 )
<|file_name|>_ext_type.py<|end_file_name|><|fim▁begin|># $File: _ext_type.py # $Date: Wed Feb 22 15:04:06 2012 +0800 #<|fim▁hole|># Contributors to this file: # Kai Jia <[email protected]> # # This file is part of pynojo # # pynojo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # pynojo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pynojo. If not, see <http://www.gnu.org/licenses/>. # """Extra SQLAlchemy ORM types""" __all__ = ['JSONEncodeDict'] import cjson from sqlalchemy.types import TypeDecorator, String from sqlalchemy.ext.mutable import Mutable from pynojo.exc import PynojoRuntimeError class JSONEncodeDict(TypeDecorator): """Represents an mutable python *dict* as a json-encoded string.""" # pylint: disable=W0223 impl = String def process_bind_param(self, value, dialect): if value is not None: value = cjson.encode(value) if len(value) > self.length: raise PynojoRuntimeError(_( '{class_name}: encoded string too long', class_name = self.__class__.__name__)) return value def process_result_value(self, value, dialect): if value is not None: value = cjson.decode(value) return value class _JSONEncodeDictMutabilize(Mutable, dict): @classmethod def coerce(cls, key, value): if not isinstance(value, _JSONEncodeDictMutabilize): if isinstance(value, dict): return _JSONEncodeDictMutabilize(value) return Mutable.coerce(key, value) else: return value def __setitem__(self, key, value): dict.__setitem__(self, key, value) self.changed() def __delitem__(self, key): dict.__delitem__(self, key) self.changed() _JSONEncodeDictMutabilize.associate_with(JSONEncodeDict)<|fim▁end|>
# Copyright (C) 2012 the pynojo development team <see AUTHORS file> #
<|file_name|>cabi_x86.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use syntax::abi::{OsWin32, OsMacos}; use lib::llvm::*; use super::cabi::*; use super::common::*; use super::machine::*; use middle::trans::type_::Type; pub fn compute_abi_info(ccx: &CrateContext, atys: &[Type], rty: Type, ret_def: bool) -> FnType { let mut arg_tys = Vec::new(); let ret_ty; if !ret_def { ret_ty = ArgType::direct(Type::void(ccx), None, None, None); } else if rty.kind() == Struct { // Returning a structure. Most often, this will use // a hidden first argument. On some platforms, though, // small structs are returned as integers. // // Some links: // http://www.angelcode.com/dev/callconv/callconv.html // Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp enum Strategy { RetValue(Type), RetPointer } let strategy = match ccx.sess().targ_cfg.os { OsWin32 | OsMacos => { match llsize_of_alloc(ccx, rty) { 1 => RetValue(Type::i8(ccx)), 2 => RetValue(Type::i16(ccx)), 4 => RetValue(Type::i32(ccx)), 8 => RetValue(Type::i64(ccx)), _ => RetPointer } } _ => { RetPointer } }; match strategy {<|fim▁hole|> ret_ty = ArgType::direct(rty, Some(t), None, None); } RetPointer => { ret_ty = ArgType::indirect(rty, Some(StructRetAttribute)); } } } else { ret_ty = ArgType::direct(rty, None, None, None); } for &t in atys.iter() { let ty = match t.kind() { Struct => { let size = llsize_of_alloc(ccx, t); if size == 0 { ArgType::ignore(t) } else { ArgType::indirect(t, Some(ByValAttribute)) } } _ => ArgType::direct(t, None, None, None), }; arg_tys.push(ty); } return FnType { arg_tys: arg_tys, ret_ty: ret_ty, }; }<|fim▁end|>
RetValue(t) => {
<|file_name|>lace.js<|end_file_name|><|fim▁begin|>import { $, util } from './util-node' import Taglet from './taglet' var lace, version = '1.0.0', defaults = { opts: {} }, warehouse = { singleton: null, compiled_dom: null, laces: { global: null }, taglets: {} } ; class Lace { constructor(name) { this.name = name; }<|fim▁hole|> /** * * @param name * @param def * @param global, true when make available only for this lace * @returns {*} */ annotation(name, def, global = false) { /*if (typeof def !== Type.UNDEFINED) { this.definition('annotation', name, def); } return this.instance('annotation', name);*/ } taglet(name, def, global = false) { /*if (typeof def !== Type.UNDEFINED) { this.definition('taglet', name, def); } return this.instance('taglet', name);*/ } compile() { } render(template, data) { var $tmpl = $(template); console.log($tmpl); } definition(type, name, def) { return this.__lace__[type]['definitions'][name] = def || this.__lace__[type]['definitions'][name]; } instance(type, name, inst) { return this.__lace__[type]['instances'][name] = inst || this.__lace__[type]['instances'][name]; } } //TODO: should I declare in prototype? Lace.prototype.__lace__ = { annotation: { definitions: {}, instances: {} }, taglet: { definitions: {}, instances: {} } }; Lace.init = function (name) { return warehouse.laces[name] = warehouse.laces[name] || new Lace(name); }; Lace.parse = function(template) { }; /** * MODULE function for lace * @param name * @returns {Function|*} */ lace = function(name) { name = name || 'global'; return Lace.init(name); }; export default lace;<|fim▁end|>
<|file_name|>tydecode.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Type decoding // tjc note: Would be great to have a `match check` macro equivalent // for some of these #![allow(non_camel_case_types)] use middle::subst; use middle::subst::VecPerParamSpace; use middle::ty; use std::rc::Rc; use std::str; use std::string::String; use std::uint; use syntax::abi; use syntax::ast; use syntax::ast::*; use syntax::parse::token; // Compact string representation for ty::t values. API ty_str & // parse_from_str. Extra parameters are for converting to/from def_ids in the // data buffer. Whatever format you choose should not contain pipe characters. // Def id conversion: when we encounter def-ids, they have to be translated. // For example, the crate number must be converted from the crate number used<|fim▁hole|>// in the library we are reading from into the local crate numbers in use // here. To perform this translation, the type decoder is supplied with a // conversion function of type `conv_did`. // // Sometimes, particularly when inlining, the correct translation of the // def-id will depend on where it originated from. Therefore, the conversion // function is given an indicator of the source of the def-id. See // astencode.rs for more information. pub enum DefIdSource { // Identifies a struct, trait, enum, etc. NominalType, // Identifies a type alias (`type X = ...`). TypeWithId, // Identifies a type parameter (`fn foo<X>() { ... }`). TypeParameter, // Identifies a region parameter (`fn foo<'X>() { ... }`). RegionParameter, } pub type conv_did<'a> = |source: DefIdSource, ast::DefId|: 'a -> ast::DefId; pub struct PState<'a> { data: &'a [u8], krate: ast::CrateNum, pos: uint, tcx: &'a ty::ctxt } fn peek(st: &PState) -> char { st.data[st.pos] as char } fn next(st: &mut PState) -> char { let ch = st.data[st.pos] as char; st.pos = st.pos + 1u; return ch; } fn next_byte(st: &mut PState) -> u8 { let b = st.data[st.pos]; st.pos = st.pos + 1u; return b; } fn scan<R>(st: &mut PState, is_last: |char| -> bool, op: |&[u8]| -> R) -> R { let start_pos = st.pos; debug!("scan: '{}' (start)", st.data[st.pos] as char); while !is_last(st.data[st.pos] as char) { st.pos += 1; debug!("scan: '{}'", st.data[st.pos] as char); } let end_pos = st.pos; st.pos += 1; return op(st.data.slice(start_pos, end_pos)); } pub fn parse_ident(st: &mut PState, last: char) -> ast::Ident { fn is_last(b: char, c: char) -> bool { return c == b; } return parse_ident_(st, |a| is_last(last, a) ); } fn parse_ident_(st: &mut PState, is_last: |char| -> bool) -> ast::Ident { scan(st, is_last, |bytes| { token::str_to_ident(str::from_utf8(bytes).unwrap()) }) } pub fn parse_state_from_data<'a>(data: &'a [u8], crate_num: ast::CrateNum, pos: uint, tcx: &'a ty::ctxt) -> PState<'a> { PState { data: data, krate: crate_num, pos: pos, tcx: tcx } } fn data_log_string(data: &[u8], pos: uint) -> String { let mut buf = String::new(); buf.push_str("<<"); for i in range(pos, data.len()) { let c = data[i]; if c > 0x20 && c <= 0x7F { buf.push_char(c as char); } else { buf.push_char('.'); } } buf.push_str(">>"); buf } pub fn parse_ty_data(data: &[u8], crate_num: ast::CrateNum, pos: uint, tcx: &ty::ctxt, conv: conv_did) -> ty::t { debug!("parse_ty_data {}", data_log_string(data, pos)); let mut st = parse_state_from_data(data, crate_num, pos, tcx); parse_ty(&mut st, conv) } pub fn parse_bare_fn_ty_data(data: &[u8], crate_num: ast::CrateNum, pos: uint, tcx: &ty::ctxt, conv: conv_did) -> ty::BareFnTy { debug!("parse_bare_fn_ty_data {}", data_log_string(data, pos)); let mut st = parse_state_from_data(data, crate_num, pos, tcx); parse_bare_fn_ty(&mut st, conv) } pub fn parse_trait_ref_data(data: &[u8], crate_num: ast::CrateNum, pos: uint, tcx: &ty::ctxt, conv: conv_did) -> ty::TraitRef { debug!("parse_trait_ref_data {}", data_log_string(data, pos)); let mut st = parse_state_from_data(data, crate_num, pos, tcx); parse_trait_ref(&mut st, conv) } pub fn parse_substs_data(data: &[u8], crate_num: ast::CrateNum, pos: uint, tcx: &ty::ctxt, conv: conv_did) -> subst::Substs { debug!("parse_substs_data {}", data_log_string(data, pos)); let mut st = parse_state_from_data(data, crate_num, pos, tcx); parse_substs(&mut st, conv) } fn parse_size(st: &mut PState) -> Option<uint> { assert_eq!(next(st), '/'); if peek(st) == '|' { assert_eq!(next(st), '|'); None } else { let n = parse_uint(st); assert_eq!(next(st), '|'); Some(n) } } fn parse_trait_store(st: &mut PState, conv: conv_did) -> ty::TraitStore { match next(st) { '~' => ty::UniqTraitStore, '&' => ty::RegionTraitStore(parse_region(st, conv), parse_mutability(st)), c => { st.tcx.sess.bug(format!("parse_trait_store(): bad input '{}'", c).as_slice()) } } } fn parse_vec_per_param_space<T>(st: &mut PState, f: |&mut PState| -> T) -> VecPerParamSpace<T> { let mut r = VecPerParamSpace::empty(); for &space in subst::ParamSpace::all().iter() { assert_eq!(next(st), '['); while peek(st) != ']' { r.push(space, f(st)); } assert_eq!(next(st), ']'); } r } fn parse_substs(st: &mut PState, conv: conv_did) -> subst::Substs { let regions = parse_region_substs(st, |x,y| conv(x,y)); let types = parse_vec_per_param_space(st, |st| parse_ty(st, |x,y| conv(x,y))); return subst::Substs { types: types, regions: regions }; } fn parse_region_substs(st: &mut PState, conv: conv_did) -> subst::RegionSubsts { match next(st) { 'e' => subst::ErasedRegions, 'n' => { subst::NonerasedRegions( parse_vec_per_param_space( st, |st| parse_region(st, |x,y| conv(x,y)))) } _ => fail!("parse_bound_region: bad input") } } fn parse_bound_region(st: &mut PState, conv: conv_did) -> ty::BoundRegion { match next(st) { 'a' => { let id = parse_uint(st); assert_eq!(next(st), '|'); ty::BrAnon(id) } '[' => { let def = parse_def(st, RegionParameter, |x,y| conv(x,y)); let ident = token::str_to_ident(parse_str(st, ']').as_slice()); ty::BrNamed(def, ident.name) } 'f' => { let id = parse_uint(st); assert_eq!(next(st), '|'); ty::BrFresh(id) } _ => fail!("parse_bound_region: bad input") } } fn parse_region(st: &mut PState, conv: conv_did) -> ty::Region { match next(st) { 'b' => { assert_eq!(next(st), '['); let id = parse_uint(st) as ast::NodeId; assert_eq!(next(st), '|'); let br = parse_bound_region(st, |x,y| conv(x,y)); assert_eq!(next(st), ']'); ty::ReLateBound(id, br) } 'B' => { assert_eq!(next(st), '['); let node_id = parse_uint(st) as ast::NodeId; assert_eq!(next(st), '|'); let space = parse_param_space(st); assert_eq!(next(st), '|'); let index = parse_uint(st); assert_eq!(next(st), '|'); let nm = token::str_to_ident(parse_str(st, ']').as_slice()); ty::ReEarlyBound(node_id, space, index, nm.name) } 'f' => { assert_eq!(next(st), '['); let id = parse_uint(st) as ast::NodeId; assert_eq!(next(st), '|'); let br = parse_bound_region(st, |x,y| conv(x,y)); assert_eq!(next(st), ']'); ty::ReFree(ty::FreeRegion {scope_id: id, bound_region: br}) } 's' => { let id = parse_uint(st) as ast::NodeId; assert_eq!(next(st), '|'); ty::ReScope(id) } 't' => { ty::ReStatic } 'e' => { ty::ReStatic } _ => fail!("parse_region: bad input") } } fn parse_opt<T>(st: &mut PState, f: |&mut PState| -> T) -> Option<T> { match next(st) { 'n' => None, 's' => Some(f(st)), _ => fail!("parse_opt: bad input") } } fn parse_str(st: &mut PState, term: char) -> String { let mut result = String::new(); while peek(st) != term { unsafe { result.push_bytes([next_byte(st)]) } } next(st); result } fn parse_trait_ref(st: &mut PState, conv: conv_did) -> ty::TraitRef { let def = parse_def(st, NominalType, |x,y| conv(x,y)); let substs = parse_substs(st, |x,y| conv(x,y)); ty::TraitRef {def_id: def, substs: substs} } fn parse_ty(st: &mut PState, conv: conv_did) -> ty::t { match next(st) { 'n' => return ty::mk_nil(), 'z' => return ty::mk_bot(), 'b' => return ty::mk_bool(), 'i' => return ty::mk_int(), 'u' => return ty::mk_uint(), 'M' => { match next(st) { 'b' => return ty::mk_mach_uint(ast::TyU8), 'w' => return ty::mk_mach_uint(ast::TyU16), 'l' => return ty::mk_mach_uint(ast::TyU32), 'd' => return ty::mk_mach_uint(ast::TyU64), 'B' => return ty::mk_mach_int(ast::TyI8), 'W' => return ty::mk_mach_int(ast::TyI16), 'L' => return ty::mk_mach_int(ast::TyI32), 'D' => return ty::mk_mach_int(ast::TyI64), 'f' => return ty::mk_mach_float(ast::TyF32), 'F' => return ty::mk_mach_float(ast::TyF64), _ => fail!("parse_ty: bad numeric type") } } 'c' => return ty::mk_char(), 't' => { assert_eq!(next(st), '['); let def = parse_def(st, NominalType, |x,y| conv(x,y)); let substs = parse_substs(st, |x,y| conv(x,y)); assert_eq!(next(st), ']'); return ty::mk_enum(st.tcx, def, substs); } 'x' => { assert_eq!(next(st), '['); let def = parse_def(st, NominalType, |x,y| conv(x,y)); let substs = parse_substs(st, |x,y| conv(x,y)); let bounds = parse_bounds(st, |x,y| conv(x,y)); assert_eq!(next(st), ']'); return ty::mk_trait(st.tcx, def, substs, bounds.builtin_bounds); } 'p' => { let did = parse_def(st, TypeParameter, |x,y| conv(x,y)); debug!("parsed ty_param: did={:?}", did); let index = parse_uint(st); assert_eq!(next(st), '|'); let space = parse_param_space(st); assert_eq!(next(st), '|'); return ty::mk_param(st.tcx, space, index, did); } '@' => return ty::mk_box(st.tcx, parse_ty(st, |x,y| conv(x,y))), '~' => return ty::mk_uniq(st.tcx, parse_ty(st, |x,y| conv(x,y))), '*' => return ty::mk_ptr(st.tcx, parse_mt(st, |x,y| conv(x,y))), '&' => { let r = parse_region(st, |x,y| conv(x,y)); let mt = parse_mt(st, |x,y| conv(x,y)); return ty::mk_rptr(st.tcx, r, mt); } 'V' => { let mt = parse_mt(st, |x,y| conv(x,y)); let sz = parse_size(st); return ty::mk_vec(st.tcx, mt, sz); } 'v' => { return ty::mk_str(st.tcx); } 'T' => { assert_eq!(next(st), '['); let mut params = Vec::new(); while peek(st) != ']' { params.push(parse_ty(st, |x,y| conv(x,y))); } st.pos = st.pos + 1u; return ty::mk_tup(st.tcx, params); } 'f' => { return ty::mk_closure(st.tcx, parse_closure_ty(st, |x,y| conv(x,y))); } 'F' => { return ty::mk_bare_fn(st.tcx, parse_bare_fn_ty(st, |x,y| conv(x,y))); } '#' => { let pos = parse_hex(st); assert_eq!(next(st), ':'); let len = parse_hex(st); assert_eq!(next(st), '#'); let key = ty::creader_cache_key {cnum: st.krate, pos: pos, len: len }; match st.tcx.rcache.borrow().find_copy(&key) { Some(tt) => return tt, None => {} } let mut ps = PState { pos: pos, .. *st }; let tt = parse_ty(&mut ps, |x,y| conv(x,y)); st.tcx.rcache.borrow_mut().insert(key, tt); return tt; } '"' => { let _ = parse_def(st, TypeWithId, |x,y| conv(x,y)); let inner = parse_ty(st, |x,y| conv(x,y)); inner } 'a' => { assert_eq!(next(st), '['); let did = parse_def(st, NominalType, |x,y| conv(x,y)); let substs = parse_substs(st, |x,y| conv(x,y)); assert_eq!(next(st), ']'); return ty::mk_struct(st.tcx, did, substs); } 'e' => { return ty::mk_err(); } c => { fail!("unexpected char in type string: {}", c);} } } fn parse_mutability(st: &mut PState) -> ast::Mutability { match peek(st) { 'm' => { next(st); ast::MutMutable } _ => { ast::MutImmutable } } } fn parse_mt(st: &mut PState, conv: conv_did) -> ty::mt { let m = parse_mutability(st); ty::mt { ty: parse_ty(st, |x,y| conv(x,y)), mutbl: m } } fn parse_def(st: &mut PState, source: DefIdSource, conv: conv_did) -> ast::DefId { return conv(source, scan(st, |c| { c == '|' }, parse_def_id)); } fn parse_uint(st: &mut PState) -> uint { let mut n = 0; loop { let cur = peek(st); if cur < '0' || cur > '9' { return n; } st.pos = st.pos + 1u; n *= 10; n += (cur as uint) - ('0' as uint); }; } fn parse_param_space(st: &mut PState) -> subst::ParamSpace { subst::ParamSpace::from_uint(parse_uint(st)) } fn parse_hex(st: &mut PState) -> uint { let mut n = 0u; loop { let cur = peek(st); if (cur < '0' || cur > '9') && (cur < 'a' || cur > 'f') { return n; } st.pos = st.pos + 1u; n *= 16u; if '0' <= cur && cur <= '9' { n += (cur as uint) - ('0' as uint); } else { n += 10u + (cur as uint) - ('a' as uint); } }; } fn parse_fn_style(c: char) -> FnStyle { match c { 'u' => UnsafeFn, 'n' => NormalFn, _ => fail!("parse_fn_style: bad fn_style {}", c) } } fn parse_abi_set(st: &mut PState) -> abi::Abi { assert_eq!(next(st), '['); scan(st, |c| c == ']', |bytes| { let abi_str = str::from_utf8(bytes).unwrap(); abi::lookup(abi_str.as_slice()).expect(abi_str) }) } fn parse_onceness(c: char) -> ast::Onceness { match c { 'o' => ast::Once, 'm' => ast::Many, _ => fail!("parse_onceness: bad onceness") } } fn parse_closure_ty(st: &mut PState, conv: conv_did) -> ty::ClosureTy { let fn_style = parse_fn_style(next(st)); let onceness = parse_onceness(next(st)); let store = parse_trait_store(st, |x,y| conv(x,y)); let bounds = parse_bounds(st, |x,y| conv(x,y)); let sig = parse_sig(st, |x,y| conv(x,y)); ty::ClosureTy { fn_style: fn_style, onceness: onceness, store: store, bounds: bounds.builtin_bounds, sig: sig } } fn parse_bare_fn_ty(st: &mut PState, conv: conv_did) -> ty::BareFnTy { let fn_style = parse_fn_style(next(st)); let abi = parse_abi_set(st); let sig = parse_sig(st, |x,y| conv(x,y)); ty::BareFnTy { fn_style: fn_style, abi: abi, sig: sig } } fn parse_sig(st: &mut PState, conv: conv_did) -> ty::FnSig { assert_eq!(next(st), '['); let id = parse_uint(st) as ast::NodeId; assert_eq!(next(st), '|'); let mut inputs = Vec::new(); while peek(st) != ']' { inputs.push(parse_ty(st, |x,y| conv(x,y))); } st.pos += 1u; // eat the ']' let variadic = match next(st) { 'V' => true, 'N' => false, r => fail!(format!("bad variadic: {}", r)), }; let ret_ty = parse_ty(st, |x,y| conv(x,y)); ty::FnSig {binder_id: id, inputs: inputs, output: ret_ty, variadic: variadic} } // Rust metadata parsing pub fn parse_def_id(buf: &[u8]) -> ast::DefId { let mut colon_idx = 0u; let len = buf.len(); while colon_idx < len && buf[colon_idx] != ':' as u8 { colon_idx += 1u; } if colon_idx == len { error!("didn't find ':' when parsing def id"); fail!(); } let crate_part = buf.slice(0u, colon_idx); let def_part = buf.slice(colon_idx + 1u, len); let crate_num = match uint::parse_bytes(crate_part, 10u) { Some(cn) => cn as ast::CrateNum, None => fail!("internal error: parse_def_id: crate number expected, but found {:?}", crate_part) }; let def_num = match uint::parse_bytes(def_part, 10u) { Some(dn) => dn as ast::NodeId, None => fail!("internal error: parse_def_id: id expected, but found {:?}", def_part) }; ast::DefId { krate: crate_num, node: def_num } } pub fn parse_type_param_def_data(data: &[u8], start: uint, crate_num: ast::CrateNum, tcx: &ty::ctxt, conv: conv_did) -> ty::TypeParameterDef { let mut st = parse_state_from_data(data, crate_num, start, tcx); parse_type_param_def(&mut st, conv) } fn parse_type_param_def(st: &mut PState, conv: conv_did) -> ty::TypeParameterDef { let ident = parse_ident(st, ':'); let def_id = parse_def(st, NominalType, |x,y| conv(x,y)); let space = parse_param_space(st); assert_eq!(next(st), '|'); let index = parse_uint(st); assert_eq!(next(st), '|'); let bounds = Rc::new(parse_bounds(st, |x,y| conv(x,y))); let default = parse_opt(st, |st| parse_ty(st, |x,y| conv(x,y))); ty::TypeParameterDef { ident: ident, def_id: def_id, space: space, index: index, bounds: bounds, default: default } } fn parse_bounds(st: &mut PState, conv: conv_did) -> ty::ParamBounds { let mut param_bounds = ty::ParamBounds { builtin_bounds: ty::empty_builtin_bounds(), trait_bounds: Vec::new() }; loop { match next(st) { 'S' => { param_bounds.builtin_bounds.add(ty::BoundSend); } 'O' => { param_bounds.builtin_bounds.add(ty::BoundStatic); } 'Z' => { param_bounds.builtin_bounds.add(ty::BoundSized); } 'P' => { param_bounds.builtin_bounds.add(ty::BoundCopy); } 'T' => { param_bounds.builtin_bounds.add(ty::BoundShare); } 'I' => { param_bounds.trait_bounds.push(Rc::new(parse_trait_ref(st, |x,y| conv(x,y)))); } '.' => { return param_bounds; } c => { fail!("parse_bounds: bad bounds ('{}')", c) } } } }<|fim▁end|>
<|file_name|>IBaseState.ts<|end_file_name|><|fim▁begin|>/** * https://docs.aws.amazon.com/step-functions/latest/dg/awl-ref-states.html */ interface IBaseState { /**<|fim▁hole|> Type: string /** * The name of the next state that will be run when the current state finishes. * Some state types, such as Choice, allow multiple transition states. */ Next?: string /** * Designates this state as a terminal state (it ends the execution) if set to true. * There can be any number of terminal states per state machine. Only one of Next or End can * be used in a state. Some state types, such as Choice, do not support or use the End field. */ End?: boolean /** * Holds a human-readable description of the state. [Optional] */ Comment?: string /** * A Path that selects a portion of the state's input to be passed to the state's task for * processing. If omitted, it has the value $ which designates the entire input. * (See Filters). [Optional] */ InputPath?: string /** * A Path that selects a portion of the state's input to be passed to the state's output. * If omitted, it has the value $ which designates the entire input. (See Filters. ) [Optional] */ OutputPath?: string } export default IBaseState<|fim▁end|>
* The state's type. Can be any of the values listed in State Types. [Required] */
<|file_name|>regions-trait-object-1.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // This is a regression test for something that only came up while // attempting to bootstrap libsyntax; it is adapted from // `syntax::ext::tt::generic_extension`. pub struct E<'a> { pub f: &'a u8, } impl<'b> E<'b> { pub fn m(&self) -> &'b u8 { self.f } } pub struct P<'c> { pub g: &'c u8, } pub trait M { fn n(&self) -> u8; } impl<'d> M for P<'d> { fn n(&self) -> u8 { *self.g } } <|fim▁hole|> loop { let p = P { g: x.m() }; return Box::new(p) as Box<M+'e>; } } fn main() { let w = E { f: &10 }; let o = extension(&w); assert_eq!(o.n(), 10); }<|fim▁end|>
fn extension<'e>(x: &'e E<'e>) -> Box<M+'e> {
<|file_name|>EmailSetupGUI.java<|end_file_name|><|fim▁begin|>/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package com.github.sunnybat.paxchecker.setup.email; import com.github.sunnybat.commoncode.email.EmailAddress; import com.github.sunnybat.commoncode.email.account.EmailAccount; import com.github.sunnybat.commoncode.preferences.PreferenceHandler; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import java.awt.event.KeyAdapter; import java.awt.event.KeyEvent; import java.awt.event.WindowAdapter; import java.awt.event.WindowEvent; import java.util.ArrayList; import java.util.List; import javax.swing.BoxLayout; import javax.swing.ButtonGroup; import javax.swing.DefaultComboBoxModel; import javax.swing.GroupLayout; import javax.swing.JButton; import javax.swing.JComboBox; import javax.swing.JLabel; import javax.swing.JOptionPane; import javax.swing.JPanel; import javax.swing.JRadioButton; import javax.swing.JScrollPane; import javax.swing.JTabbedPane; import javax.swing.JTable; import javax.swing.JTextField; import javax.swing.LayoutStyle; import javax.swing.WindowConstants; import javax.swing.table.DefaultTableModel; /** * * @author SunnyBat */ public class EmailSetupGUI extends javax.swing.JFrame { private AuthenticationCallback myCallback = new AuthenticationCallback(); private AuthGmail authGmail = new AuthGmail(myCallback); private AuthSMTP authSmtp = new AuthSMTP(myCallback); private PreferenceHandler prefs; private EmailAccount finalizedEmailAccount; private boolean disableEmail = false; private List<EmailAddress> savedEmailAddresses; private boolean savedIsGmail; /** * Creates new form EmailUIWrapper * * @param prefs The Preferences to save email configuration settings to and load from */ public EmailSetupGUI(PreferenceHandler prefs) { this.prefs = prefs; initComponents(); customComponents(); } private void customComponents() { String smtpAddress = prefs.getStringPreference("EMAIL"); String emailString = prefs.getStringPreference("CELLNUM"); String emailType = prefs.getStringPreference("EMAILTYPE"); // TODO: we need to initialize everything here, including Send To // addresses and the EmailAccount we're using if (emailType != null && emailType.equalsIgnoreCase("SMTP")) { JRBSMTP.setSelected(true); setAuthPanel(authSmtp); savedIsGmail = false;<|fim▁hole|> } else { System.out.println("smtpIsNull"); } authSmtp.recordCurrentFields(); } else { JRBGmail.setSelected(true); setAuthPanel(authGmail); savedIsGmail = true; if (emailType != null) { // Assumed to be Gmail authGmail.authenticate(); } authGmail.recordCurrentFields(); } if (emailString != null) { List<EmailAddress> addresses = EmailAddress.convertToList(emailString); for (EmailAddress address : addresses) { DefaultTableModel table = (DefaultTableModel) JTCellNumbers.getModel(); table.addRow(new Object[]{address.getCarrierName().equalsIgnoreCase("[Other]") ? address.getCompleteAddress() : address.getAddressBeginning(), address.getCarrierName()}); } } savedEmailAddresses = getCurrentEmails(); savedIsGmail = JRBGmail.isSelected(); this.addWindowListener(new WindowAdapter() { @Override public void windowClosing(WindowEvent e) { int result = JOptionPane.showConfirmDialog(null, "Would you like to save your changes?\r\nYes: Save Changes\r\nNo: Disable Email\r\nCancel: Discard changes\r\n[X] Button: Keep window open", "Save Changes", JOptionPane.YES_NO_CANCEL_OPTION, JOptionPane.QUESTION_MESSAGE); if (result == JOptionPane.YES_OPTION) { saveChanges(); } else if (result == JOptionPane.NO_OPTION) { disableEmail(); } else if (result == JOptionPane.CANCEL_OPTION) { cancelChanges(); } } }); } /** * Gets the currently configured EmailAccount. This includes the email addresses currently * configured to be sent to. The EmailAccount must be successfully authenticated, otherwise null * will be returned. * * @return The EmailAccount configured, or null if not set up */ public EmailAccount getEmailAccount() { if (disableEmail) { return null; } EmailAccount account; if (JRBGmail.isSelected() && authGmail.isAuthenticated()) { account = authGmail.getEmailAccount(); } else if (JRBSMTP.isSelected() && authSmtp.isAuthenticated()) { account = authSmtp.getEmailAccount(); } else { return null; } // === Add emails to account === account.clearAllSendAddresses(); // TODO Check to make sure this is the right thing to do, or if there's a better way DefaultTableModel tableModel = (DefaultTableModel) JTCellNumbers.getModel(); if (tableModel.getRowCount() == 0) { return null; } else { for (int i = 0; i < tableModel.getRowCount(); i++) { EmailAddress toAdd; String emailBeginning = (String) tableModel.getValueAt(i, 0); String emailCarrier = (String) tableModel.getValueAt(i, 1); if (emailCarrier.equalsIgnoreCase("[Other]")) { toAdd = new EmailAddress(emailBeginning); } else { toAdd = new EmailAddress(emailBeginning + EmailAddress.getCarrierExtension(emailCarrier)); } account.addBccEmailAddress(toAdd); } } finalizedEmailAccount = account; return finalizedEmailAccount; } public String getEmailType() { if (JRBGmail.isSelected() && authGmail.isAuthenticated()) { return "Gmail API"; } else if (JRBSMTP.isSelected() && authSmtp.isAuthenticated()) { return "SMTP"; } else { return "Disabled"; } } /** * Gets the semicolon-delimited String representing all of the email addresses configured. * * @return All the configured email addresses */ public String getEmailAddressesString() { List<EmailAddress> addresses = getCurrentEmails(); StringBuilder allAddresses = new StringBuilder(); for (int i = 0; i < addresses.size(); i++) { if (i > 0) { allAddresses.append(";"); } allAddresses.append(addresses.get(i).getCompleteAddress()); } return allAddresses.toString(); } private List<EmailAddress> getCurrentEmails() { List<EmailAddress> ret = new ArrayList<>(); DefaultTableModel tableModel = (DefaultTableModel) JTCellNumbers.getModel(); for (int i = 0; i < tableModel.getRowCount(); i++) { try { EmailAddress toAdd; String emailBeginning = (String) tableModel.getValueAt(i, 0); String emailCarrier = (String) tableModel.getValueAt(i, 1); if (emailCarrier.equalsIgnoreCase("[Other]")) { toAdd = new EmailAddress(emailBeginning); } else { toAdd = new EmailAddress(emailBeginning + EmailAddress.getCarrierExtension(emailCarrier)); } ret.add(toAdd); } catch (IllegalArgumentException iae) { System.out.println("Invalid email address: " + tableModel.getValueAt(i, 0) + tableModel.getValueAt(i, 1)); } } return ret; } private void addEmail() { String cellNumber = JTFCellNumber.getText(); String carrier = JCBCarrier.getSelectedItem().toString(); if (!cellNumber.isEmpty()) { if (cellNumber.contains("@")) { // Full email configured carrier = "[Other]"; } ((DefaultTableModel) JTCellNumbers.getModel()).addRow(new Object[]{cellNumber, carrier}); JTFCellNumber.setText(null); JCBCarrier.setSelectedIndex(0); JTFCellNumber.requestFocus(); } } private void resetChanges() { DefaultTableModel model = (DefaultTableModel) JTCellNumbers.getModel(); for (int i = model.getRowCount() - 1; i >= 0; i--) { model.removeRow(i); } if (savedEmailAddresses != null) { for (EmailAddress address : savedEmailAddresses) { model.addRow(new Object[]{address.getAddressBeginning(), EmailAddress.getProvider(address.getAddressEnding())}); } } if (savedIsGmail) { JRBGmail.setSelected(true); setAuthPanel(authGmail); } else { JRBSMTP.setSelected(true); setAuthPanel(authSmtp); } } private void setAuthPanel(JPanel toUse) { JPAuthInfo.removeAll(); JPAuthInfo.add(toUse); JPAuthInfo.revalidate(); JPAuthInfo.repaint(); pack(); } private void resetUserInputFields() { JTPComponents.setSelectedIndex(0); JTFCellNumber.setText(null); JCBCarrier.setSelectedIndex(0); } private void updatePreferences() { EmailAccount toSave = getEmailAccount(); if (!disableEmail && toSave != null) { prefs.getPreferenceObject("EMAIL").setValue(toSave.getEmailAddress()); prefs.getPreferenceObject("CELLNUM").setValue(getEmailAddressesString()); prefs.getPreferenceObject("EMAILTYPE").setValue(getEmailType()); prefs.getPreferenceObject("EMAILENABLED").setValue(true); } else { prefs.getPreferenceObject("EMAIL").setValue(null); prefs.getPreferenceObject("CELLNUM").setValue(null); prefs.getPreferenceObject("EMAILTYPE").setValue(null); prefs.getPreferenceObject("EMAILENABLED").setValue(false); } } private void saveChanges() { if (getCurrentEmails().isEmpty()) { int result = JOptionPane.showConfirmDialog(null, "You have no Send To emails configured. This means emails will still be disabled.\r\nAre you sure you want to save your changes?\r\nPress Yes to save your changes, or No to add email addresses.", "No Emails Input", JOptionPane.YES_NO_OPTION, JOptionPane.QUESTION_MESSAGE); if (result == JOptionPane.NO_OPTION) { JTPComponents.setSelectedComponent(JPSendTo); return; } } authGmail.recordCurrentFields(); authSmtp.recordCurrentFields(); savedEmailAddresses = getCurrentEmails(); savedIsGmail = JRBGmail.isSelected(); disableEmail = false; setVisible(false); resetUserInputFields(); updatePreferences(); } private void cancelChanges() { authGmail.resetChanges(); authSmtp.resetChanges(); resetChanges(); setVisible(false); resetUserInputFields(); updatePreferences(); } private void disableEmail() { disableEmail = true; authGmail.resetChanges(); authSmtp.resetChanges(); resetChanges(); setVisible(false); resetUserInputFields(); updatePreferences(); } /** * This method is called from within the constructor to initialize the form. WARNING: Do NOT * modify this code. The content of this method is always regenerated by the Form Editor. */ @SuppressWarnings("unchecked") // <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents private void initComponents() { BGAuthType = new ButtonGroup(); JTPComponents = new JTabbedPane(); JPAuthentication = new JPanel(); JRBGmail = new JRadioButton(); JRBSMTP = new JRadioButton(); JPAuthInfo = new JPanel(); JPSendTo = new JPanel(); JTFCellNumber = new JTextField(); JBAddNumber = new JButton(); JCBCarrier = new JComboBox<>(); jLabel1 = new JLabel(); jScrollPane1 = new JScrollPane(); JTCellNumbers = new JTable(); JPFinish = new JPanel(); JBSaveChanges = new JButton(); JBCancelChanges = new JButton(); JBDisableEmail = new JButton(); setDefaultCloseOperation(WindowConstants.DO_NOTHING_ON_CLOSE); setTitle("Email Setup"); setResizable(false); BGAuthType.add(JRBGmail); JRBGmail.setText("Gmail API"); JRBGmail.setToolTipText("<html>\n<i>English</i>\n<p width=\"500\">Authenticates with Google through your browser. Recommended.</p>\n<i>Tech</i>\n<p width=\"500\">Used for authenticating with Google via OAuth2.<br></p>\n</html>"); JRBGmail.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent evt) { JRBGmailActionPerformed(evt); } }); BGAuthType.add(JRBSMTP); JRBSMTP.setText("SMTP"); JRBSMTP.setToolTipText("<html>\n<i>English</i>\n<p width=\"500\">Authenticates with any email service. Not recommended.</p>\n<i>Tech</i>\n<p width=\"500\">Authenticates with any mailserver using SMTP. Issues with this have cropped up in the past, and it's hard to detect where the problem lies. My guess is ISPs or routers blocking SMTP traffic (insane), but I don't know for sure.</p>\n</html>"); JRBSMTP.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent evt) { JRBSMTPActionPerformed(evt); } }); JPAuthInfo.setLayout(new BoxLayout(JPAuthInfo, BoxLayout.LINE_AXIS)); GroupLayout JPAuthenticationLayout = new GroupLayout(JPAuthentication); JPAuthentication.setLayout(JPAuthenticationLayout); JPAuthenticationLayout.setHorizontalGroup(JPAuthenticationLayout.createParallelGroup(GroupLayout.Alignment.LEADING) .addGroup(JPAuthenticationLayout.createSequentialGroup() .addContainerGap() .addComponent(JRBGmail) .addGap(18, 18, 18) .addComponent(JRBSMTP) .addContainerGap(249, Short.MAX_VALUE)) .addComponent(JPAuthInfo, GroupLayout.DEFAULT_SIZE, GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE) ); JPAuthenticationLayout.setVerticalGroup(JPAuthenticationLayout.createParallelGroup(GroupLayout.Alignment.LEADING) .addGroup(JPAuthenticationLayout.createSequentialGroup() .addContainerGap() .addGroup(JPAuthenticationLayout.createParallelGroup(GroupLayout.Alignment.BASELINE) .addComponent(JRBGmail) .addComponent(JRBSMTP)) .addPreferredGap(LayoutStyle.ComponentPlacement.RELATED) .addComponent(JPAuthInfo, GroupLayout.DEFAULT_SIZE, GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)) ); JTPComponents.addTab("Authentication", JPAuthentication); JTFCellNumber.addKeyListener(new KeyAdapter() { public void keyPressed(KeyEvent evt) { JTFCellNumberKeyPressed(evt); } }); JBAddNumber.setText("Add Number"); JBAddNumber.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent evt) { JBAddNumberActionPerformed(evt); } }); JBAddNumber.addKeyListener(new KeyAdapter() { public void keyPressed(KeyEvent evt) { JBAddNumberKeyPressed(evt); } }); JCBCarrier.setModel(new DefaultComboBoxModel<>(new String[] { "AT&T (MMS)", "AT&T (SMS)", "Verizon", "Sprint", "T-Mobile", "U.S. Cellular", "Bell", "Rogers", "Fido", "Koodo", "Telus", "Virgin (CAN)", "Wind", "Sasktel", "[Other]" })); JCBCarrier.addKeyListener(new KeyAdapter() { public void keyPressed(KeyEvent evt) { JCBCarrierKeyPressed(evt); } }); jLabel1.setText("Cell Number"); JTCellNumbers.setModel(new DefaultTableModel( new Object [][] { }, new String [] { "Cell Number", "Carrier" } ) { Class[] types = new Class [] { String.class, String.class }; boolean[] canEdit = new boolean [] { false, false }; public Class getColumnClass(int columnIndex) { return types [columnIndex]; } public boolean isCellEditable(int rowIndex, int columnIndex) { return canEdit [columnIndex]; } }); JTCellNumbers.setToolTipText("Delete emails by selecting them and pressing the DEL key"); JTCellNumbers.setColumnSelectionAllowed(true); JTCellNumbers.addKeyListener(new KeyAdapter() { public void keyPressed(KeyEvent evt) { JTCellNumbersKeyPressed(evt); } }); jScrollPane1.setViewportView(JTCellNumbers); GroupLayout JPSendToLayout = new GroupLayout(JPSendTo); JPSendTo.setLayout(JPSendToLayout); JPSendToLayout.setHorizontalGroup(JPSendToLayout.createParallelGroup(GroupLayout.Alignment.LEADING) .addGroup(JPSendToLayout.createSequentialGroup() .addComponent(jLabel1) .addPreferredGap(LayoutStyle.ComponentPlacement.RELATED) .addComponent(JTFCellNumber, GroupLayout.DEFAULT_SIZE, 147, Short.MAX_VALUE) .addPreferredGap(LayoutStyle.ComponentPlacement.RELATED) .addComponent(JCBCarrier, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE) .addPreferredGap(LayoutStyle.ComponentPlacement.RELATED) .addComponent(JBAddNumber)) .addComponent(jScrollPane1, GroupLayout.PREFERRED_SIZE, 0, Short.MAX_VALUE) ); JPSendToLayout.setVerticalGroup(JPSendToLayout.createParallelGroup(GroupLayout.Alignment.LEADING) .addGroup(GroupLayout.Alignment.TRAILING, JPSendToLayout.createSequentialGroup() .addContainerGap() .addGroup(JPSendToLayout.createParallelGroup(GroupLayout.Alignment.BASELINE) .addComponent(JTFCellNumber, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE) .addComponent(JBAddNumber) .addComponent(JCBCarrier, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE) .addComponent(jLabel1)) .addPreferredGap(LayoutStyle.ComponentPlacement.RELATED) .addComponent(jScrollPane1, GroupLayout.DEFAULT_SIZE, 132, Short.MAX_VALUE)) ); JTPComponents.addTab("Send To", JPSendTo); JBSaveChanges.setText("Save Changes"); JBSaveChanges.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent evt) { JBSaveChangesActionPerformed(evt); } }); JBCancelChanges.setText("Cancel Changes"); JBCancelChanges.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent evt) { JBCancelChangesActionPerformed(evt); } }); JBDisableEmail.setText("Disable Email"); JBDisableEmail.addActionListener(new ActionListener() { public void actionPerformed(ActionEvent evt) { JBDisableEmailActionPerformed(evt); } }); GroupLayout JPFinishLayout = new GroupLayout(JPFinish); JPFinish.setLayout(JPFinishLayout); JPFinishLayout.setHorizontalGroup(JPFinishLayout.createParallelGroup(GroupLayout.Alignment.LEADING) .addGroup(JPFinishLayout.createSequentialGroup() .addContainerGap() .addGroup(JPFinishLayout.createParallelGroup(GroupLayout.Alignment.LEADING) .addComponent(JBSaveChanges, GroupLayout.DEFAULT_SIZE, GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE) .addComponent(JBCancelChanges, GroupLayout.DEFAULT_SIZE, 375, Short.MAX_VALUE) .addComponent(JBDisableEmail, GroupLayout.DEFAULT_SIZE, GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)) .addContainerGap()) ); JPFinishLayout.setVerticalGroup(JPFinishLayout.createParallelGroup(GroupLayout.Alignment.LEADING) .addGroup(GroupLayout.Alignment.TRAILING, JPFinishLayout.createSequentialGroup() .addContainerGap() .addComponent(JBSaveChanges) .addGap(18, 18, 18) .addComponent(JBCancelChanges) .addGap(18, 18, 18) .addComponent(JBDisableEmail) .addContainerGap(56, Short.MAX_VALUE)) ); JTPComponents.addTab("Finish", JPFinish); GroupLayout layout = new GroupLayout(getContentPane()); getContentPane().setLayout(layout); layout.setHorizontalGroup(layout.createParallelGroup(GroupLayout.Alignment.LEADING) .addComponent(JTPComponents) ); layout.setVerticalGroup(layout.createParallelGroup(GroupLayout.Alignment.LEADING) .addComponent(JTPComponents) ); pack(); }// </editor-fold>//GEN-END:initComponents private void JRBGmailActionPerformed(ActionEvent evt) {//GEN-FIRST:event_JRBGmailActionPerformed setAuthPanel(authGmail); }//GEN-LAST:event_JRBGmailActionPerformed private void JRBSMTPActionPerformed(ActionEvent evt) {//GEN-FIRST:event_JRBSMTPActionPerformed setAuthPanel(authSmtp); }//GEN-LAST:event_JRBSMTPActionPerformed private void JBAddNumberActionPerformed(ActionEvent evt) {//GEN-FIRST:event_JBAddNumberActionPerformed addEmail(); }//GEN-LAST:event_JBAddNumberActionPerformed private void JTCellNumbersKeyPressed(KeyEvent evt) {//GEN-FIRST:event_JTCellNumbersKeyPressed if (evt.getKeyCode() == KeyEvent.VK_DELETE) { int[] selectedIndeces = JTCellNumbers.getSelectedRows(); for (int i = selectedIndeces.length - 1; i >= 0; i--) { // Iterate from the bottom up ((DefaultTableModel) JTCellNumbers.getModel()).removeRow(selectedIndeces[i]); } } else if (evt.getKeyCode() == KeyEvent.VK_TAB) { this.transferFocus(); evt.consume(); } }//GEN-LAST:event_JTCellNumbersKeyPressed private void JTFCellNumberKeyPressed(KeyEvent evt) {//GEN-FIRST:event_JTFCellNumberKeyPressed if (evt.getKeyCode() == KeyEvent.VK_ENTER) { addEmail(); } }//GEN-LAST:event_JTFCellNumberKeyPressed private void JCBCarrierKeyPressed(KeyEvent evt) {//GEN-FIRST:event_JCBCarrierKeyPressed if (evt.getKeyCode() == KeyEvent.VK_ENTER) { addEmail(); } }//GEN-LAST:event_JCBCarrierKeyPressed private void JBAddNumberKeyPressed(KeyEvent evt) {//GEN-FIRST:event_JBAddNumberKeyPressed if (evt.getKeyCode() == KeyEvent.VK_ENTER) { addEmail(); } }//GEN-LAST:event_JBAddNumberKeyPressed private void JBSaveChangesActionPerformed(ActionEvent evt) {//GEN-FIRST:event_JBSaveChangesActionPerformed saveChanges(); }//GEN-LAST:event_JBSaveChangesActionPerformed private void JBCancelChangesActionPerformed(ActionEvent evt) {//GEN-FIRST:event_JBCancelChangesActionPerformed cancelChanges(); }//GEN-LAST:event_JBCancelChangesActionPerformed private void JBDisableEmailActionPerformed(ActionEvent evt) {//GEN-FIRST:event_JBDisableEmailActionPerformed disableEmail(); }//GEN-LAST:event_JBDisableEmailActionPerformed // Variables declaration - do not modify//GEN-BEGIN:variables private ButtonGroup BGAuthType; private JButton JBAddNumber; private JButton JBCancelChanges; private JButton JBDisableEmail; private JButton JBSaveChanges; private JComboBox<String> JCBCarrier; private JPanel JPAuthInfo; private JPanel JPAuthentication; private JPanel JPFinish; private JPanel JPSendTo; private JRadioButton JRBGmail; private JRadioButton JRBSMTP; private JTable JTCellNumbers; private JTextField JTFCellNumber; private JTabbedPane JTPComponents; private JLabel jLabel1; private JScrollPane jScrollPane1; // End of variables declaration//GEN-END:variables private class AuthenticationCallback implements Runnable { private boolean nextEnabledState = false; public void run() { JBSaveChanges.setEnabled(nextEnabledState); JBCancelChanges.setEnabled(nextEnabledState); JBDisableEmail.setEnabled(nextEnabledState); JRBGmail.setEnabled(nextEnabledState); JRBSMTP.setEnabled(nextEnabledState); nextEnabledState = !nextEnabledState; // Invert } } }<|fim▁end|>
if (smtpAddress != null) { authSmtp.setEmailAddress(smtpAddress);
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|> def get_version(): version = '%s.%s' % (VERSION[0], VERSION[1]) if VERSION[2]: version = '%s.%s' % (version, VERSION[2]) if VERSION[3:] == ('alpha', 0): version = '%s pre-alpha' % version else: if VERSION[3] != 'final': version = '%s %s %s' % (version, VERSION[3], VERSION[4]) from django.utils.version import get_svn_revision svn_rev = get_svn_revision() if svn_rev != u'SVN-unknown': version = "%s %s" % (version, svn_rev) return version<|fim▁end|>
VERSION = (1, 3, 0, 'alpha', 1)
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models from django.contrib.auth.models import User from albaproject.settings import MEDIA_ROOT import pdb def _upload_to_generic(prefix_path=None, instance=None, field=None, filename=None): #pdb.set_trace() if not instance.pk: # generate DB PK if not present instance.save() if not prefix_path: if not filename: return '{0}/job_{1}/{2}'.format(instance.user.username, instance.pk, field) return '{0}/job_{1}/{2}/{3}'.format(instance.user.username, instance.pk, field, filename) return '{0}/{1}/job_{2}/{3}'.format(prefix_path, instance.user.username, instance.pk, field) class Job(models.Model): def __unicode__(self): return str(self.id) def save(self, *args, **kwargs): #pdb.set_trace() _input = self.file_input _job = self.mapred_job _output = self.file_output self.file_input = None self.mapred_job = None self.file_output = None super(Job, self).save(*args,**kwargs) self.save = super(Job, self).save self.file_input = _input self.mapred_job = _job self.file_output = _output self.save() #super.save def input_dest(self, filename): return _upload_to_generic(None, self, 'input', filename) def mapred_dest(self, filename): return _upload_to_generic(None, self, 'mapred', filename) <|fim▁hole|> return _upload_to_generic(MEDIA_ROOT, self, 'output', None) user = models.ForeignKey(User) file_input = models.FileField(upload_to=input_dest, null=True) mapred_job = models.FileField(upload_to=mapred_dest, null=True) fully_qualified_job_impl_class = models.CharField(max_length=200, null=True) file_output = models.FileField(upload_to=output_dest, null=True) submission_date = models.DateTimeField(auto_now_add=True) class Server(models.Model): job = models.ForeignKey(Job) openstack_id = models.CharField(max_length=200) server_name = models.CharField(max_length=200) vcpus = models.PositiveSmallIntegerField() ram = models.PositiveIntegerField() disk = models.PositiveIntegerField()<|fim▁end|>
def output_dest(self, filename): return _upload_to_generic(None, self, 'output', filename) def output_path(self):
<|file_name|>server.go<|end_file_name|><|fim▁begin|>package grpc import ( "context" pb "go-common/app/service/main/account/api" "go-common/app/service/main/account/conf" "go-common/app/service/main/account/service" "go-common/library/net/rpc/warden" ) // New warden rpc server func New(c *conf.Config, s *service.Service) (svr *warden.Server) { svr = warden.NewServer(c.WardenServer) pb.RegisterAccountServer(svr.Server(), &server{as: s}) return svr } // Start create and start warden rpc server func Start(c *conf.Config, s *service.Service) (svr *warden.Server, err error) { svr = warden.NewServer(c.WardenServer) pb.RegisterAccountServer(svr.Server(), &server{as: s}) if svr, err = svr.Start(); err != nil { return } return } type server struct { as *service.Service } var _ pb.AccountServer = &server{} func (s *server) Info3(ctx context.Context, req *pb.MidReq) (*pb.InfoReply, error) { info, err := s.as.Info(ctx, req.Mid) if err != nil { return nil, err } return &pb.InfoReply{Info: info}, nil } func (s *server) Infos3(ctx context.Context, req *pb.MidsReq) (*pb.InfosReply, error) { infos, err := s.as.Infos(ctx, req.Mids) if err != nil { return nil, err } return &pb.InfosReply{Infos: infos}, nil } func (s *server) InfosByName3(ctx context.Context, req *pb.NamesReq) (*pb.InfosReply, error) { infos, err := s.as.InfosByName(ctx, req.Names) if err != nil { return nil, err } return &pb.InfosReply{Infos: infos}, nil } func (s *server) Card3(ctx context.Context, req *pb.MidReq) (*pb.CardReply, error) { card, err := s.as.Card(ctx, req.Mid) if err != nil { return nil, err } return &pb.CardReply{Card: card}, nil } func (s *server) Cards3(ctx context.Context, req *pb.MidsReq) (*pb.CardsReply, error) { cards, err := s.as.Cards(ctx, req.Mids) if err != nil { return nil, err } return &pb.CardsReply{Cards: cards}, nil } func (s *server) Profile3(ctx context.Context, req *pb.MidReq) (*pb.ProfileReply, error) { profile, err := s.as.Profile(ctx, req.Mid) if err != nil { return nil, err } return &pb.ProfileReply{Profile: profile}, nil } func (s *server) ProfileWithStat3(ctx context.Context, req *pb.MidReq) (*pb.ProfileStatReply, error) { profileStat, err := s.as.ProfileWithStat(ctx, req.Mid) if err != nil { return nil, err } level := pb.LevelInfo{} level.DeepCopyFromLevelInfo(&profileStat.LevelExp) return &pb.ProfileStatReply{ Profile: profileStat.Profile,<|fim▁hole|> Coins: profileStat.Coins, Follower: profileStat.Follower, Following: profileStat.Following, }, nil } func (s *server) AddExp3(ctx context.Context, req *pb.ExpReq) (*pb.ExpReply, error) { return &pb.ExpReply{}, s.as.AddExp(ctx, req.Mid, req.Exp, req.Operater, req.Operate, req.Reason) } func (s *server) AddMoral3(ctx context.Context, req *pb.MoralReq) (*pb.MoralReply, error) { return &pb.MoralReply{}, s.as.AddMoral(ctx, req.Mid, req.Moral, req.Oper, req.Reason, req.Remark) } func (s *server) Relation3(ctx context.Context, req *pb.RelationReq) (*pb.RelationReply, error) { relation, err := s.as.Relation(ctx, req.Mid, req.Owner) if err != nil { return nil, err } return &pb.RelationReply{Following: relation.Following}, nil } func (s *server) Attentions3(ctx context.Context, req *pb.MidReq) (*pb.AttentionsReply, error) { attentions, err := s.as.Attentions(ctx, req.Mid) if err != nil { return nil, err } return &pb.AttentionsReply{Attentions: attentions}, nil } func (s *server) Blacks3(ctx context.Context, req *pb.MidReq) (*pb.BlacksReply, error) { blackList, err := s.as.Blacks(ctx, req.Mid) if err != nil { return nil, err } blackListBool := make(map[int64]bool, len(blackList)) for k := range blackList { blackListBool[k] = true } return &pb.BlacksReply{BlackList: blackListBool}, nil } func (s *server) Relations3(ctx context.Context, req *pb.RelationsReq) (*pb.RelationsReply, error) { relations, err := s.as.Relations(ctx, req.Mid, req.Owners) if err != nil { return nil, err } newRelations := make(map[int64]*pb.RelationReply, len(relations)) for k, v := range relations { newRelations[k] = &pb.RelationReply{Following: v.Following} } return &pb.RelationsReply{Relations: newRelations}, nil } func (s *server) RichRelations3(ctx context.Context, req *pb.RichRelationReq) (*pb.RichRelationsReply, error) { richRelations, err := s.as.RichRelations2(ctx, req.Owner, req.Mids) if err != nil { return nil, err } newRichRelations := make(map[int64]int32, len(richRelations)) for k, v := range richRelations { newRichRelations[k] = int32(v) } return &pb.RichRelationsReply{RichRelations: newRichRelations}, nil } func (s *server) Vip3(ctx context.Context, req *pb.MidReq) (*pb.VipReply, error) { vip, err := s.as.Vip(ctx, req.Mid) if err != nil { return nil, err } reply := new(pb.VipReply) reply.DeepCopyFromVipInfo(vip) return reply, nil } func (s *server) Vips3(ctx context.Context, req *pb.MidsReq) (*pb.VipsReply, error) { vips, err := s.as.Vips(ctx, req.Mids) if err != nil { return nil, err } pvips := make(map[int64]*pb.VipReply, len(vips)) for mid, vip := range vips { pvip := new(pb.VipReply) pvip.DeepCopyFromVipInfo(vip) pvips[mid] = pvip } return &pb.VipsReply{Vips: pvips}, nil }<|fim▁end|>
LevelInfo: level,