id
int64
20
338k
vocab_size
int64
2
671
ast_levels
int64
4
32
nloc
int64
1
451
n_ast_nodes
int64
12
5.6k
n_identifiers
int64
1
186
n_ast_errors
int64
0
10
n_words
int64
2
2.17k
n_whitespaces
int64
2
13.8k
fun_name
stringlengths
2
73
commit_message
stringlengths
51
15.3k
url
stringlengths
31
59
code
stringlengths
51
31k
ast_errors
stringlengths
0
1.46k
token_counts
int64
6
3.32k
file_name
stringlengths
5
56
language
stringclasses
1 value
path
stringlengths
7
134
commit_id
stringlengths
40
40
repo
stringlengths
3
28
complexity
int64
1
153
223,960
50
13
16
249
19
0
70
141
load_config
Fix imports to get tests passing (#2751) * Fix broken localization import in theme_tests.py * Fix tests by importing directly from config submodule * Fix test runner top level directory
https://github.com/mkdocs/mkdocs.git
def load_config(**cfg): path_base = os.path.join( os.path.abspath(os.path.dirname(__file__)), 'integration', 'minimal' ) cfg = cfg or {} if 'site_name' not in cfg: cfg['site_name'] = 'Example' if 'config_file_path' not in cfg: cfg['config_file_path'] = os.path.join(path_base, 'mkdocs.yml') if 'docs_dir' not in cfg: # Point to an actual dir to avoid a 'does not exist' error on validation. cfg['docs_dir'] = os.path.join(path_base, 'docs') conf = config.Config(schema=config_defaults.get_schema(), config_file_path=cfg['config_file_path']) conf.load_dict(cfg) errors_warnings = conf.validate() assert(errors_warnings == ([], [])), errors_warnings return conf
145
base.py
Python
mkdocs/tests/base.py
c93fc91e4dc0ef33e2ea418aaa32b0584a8d354a
mkdocs
5
125,174
17
9
3
61
10
0
17
26
_column_type
[State Observability] Use a table format by default (#26159) NOTE: tabulate is copied/pasted to the codebase for table formatting. This PR changes the default layout to be the table format for both summary and list APIs.
https://github.com/ray-project/ray.git
def _column_type(strings, has_invisible=True, numparse=True): types = [_type(s, has_invisible, numparse) for s in strings] return reduce(_more_generic, types, _bool_type)
39
tabulate.py
Python
python/ray/_private/thirdparty/tabulate/tabulate.py
adf24bfa9723b0621183bb27f0c889b813c06e8a
ray
2
267,750
41
16
12
136
15
0
57
114
parse_python_requires
ansible-test - Parse content config only once. (#78418)
https://github.com/ansible/ansible.git
def parse_python_requires(value): # type: (t.Any) -> tuple[str, ...] if not isinstance(value, str): raise ValueError('python_requires must must be of type `str` not type `%s`' % type(value)) versions: tuple[str, ...] if value == 'default': versions = SUPPORTED_PYTHON_VERSIONS elif value == 'controller': versions = CONTROLLER_PYTHON_VERSIONS else: specifier_set = SpecifierSet(value) versions = tuple(version for version in SUPPORTED_PYTHON_VERSIONS if specifier_set.contains(Version(version))) return versions
79
content_config.py
Python
test/lib/ansible_test/_internal/content_config.py
f2abfc4b3d03a2baa078477d0ad2241263a00668
ansible
6
249,554
78
14
29
313
26
0
136
539
test_persisting_event_invalidates_cache
Fix `have_seen_event` cache not being invalidated (#13863) Fix https://github.com/matrix-org/synapse/issues/13856 Fix https://github.com/matrix-org/synapse/issues/13865 > Discovered while trying to make Synapse fast enough for [this MSC2716 test for importing many batches](https://github.com/matrix-org/complement/pull/214#discussion_r741678240). As an example, disabling the `have_seen_event` cache saves 10 seconds for each `/messages` request in that MSC2716 Complement test because we're not making as many federation requests for `/state` (speeding up `have_seen_event` itself is related to https://github.com/matrix-org/synapse/issues/13625) > > But this will also make `/messages` faster in general so we can include it in the [faster `/messages` milestone](https://github.com/matrix-org/synapse/milestone/11). > > *-- https://github.com/matrix-org/synapse/issues/13856* ### The problem `_invalidate_caches_for_event` doesn't run in monolith mode which means we never even tried to clear the `have_seen_event` and other caches. And even in worker mode, it only runs on the workers, not the master (AFAICT). Additionally there was bug with the key being wrong so `_invalidate_caches_for_event` never invalidates the `have_seen_event` cache even when it does run. Because we were using the `@cachedList` wrong, it was putting items in the cache under keys like `((room_id, event_id),)` with a `set` in a `set` (ex. `(('!TnCIJPKzdQdUlIyXdQ:test', '$Iu0eqEBN7qcyF1S9B3oNB3I91v2o5YOgRNPwi_78s-k'),)`) and we we're trying to invalidate with just `(room_id, event_id)` which did nothing.
https://github.com/matrix-org/synapse.git
def test_persisting_event_invalidates_cache(self): event, event_context = self.get_success( create_event( self.hs, room_id=self.room_id, sender=self.user, type="test_event_type", content={"body": "garply"}, ) ) with LoggingContext(name="test") as ctx: # First, check `have_seen_event` for an event we have not seen yet # to prime the cache with a `false` value. res = self.get_success( self.store.have_seen_events(event.room_id, [event.event_id]) ) self.assertEqual(res, set()) # That should result in a single db query to lookup self.assertEqual(ctx.get_resource_usage().db_txn_count, 1) # Persist the event which should invalidate or prefill the # `have_seen_event` cache so we don't return stale values. persistence = self.hs.get_storage_controllers().persistence self.get_success( persistence.persist_event( event, event_context, ) ) with LoggingContext(name="test") as ctx: # Check `have_seen_event` again and we should see the updated fact # that we have now seen the event after persisting it. res = self.get_success( self.store.have_seen_events(event.room_id, [event.event_id]) ) self.assertEqual(res, {event.event_id}) # That should result in a single db query to lookup self.assertEqual(ctx.get_resource_usage().db_txn_count, 1)
187
test_events_worker.py
Python
tests/storage/databases/main/test_events_worker.py
29269d9d3f3419a3d92cdd80dae4a37e2d99a395
synapse
1
288,114
7
8
3
40
6
0
7
21
iot_standards
Add support for integrations v2 (#78801) Co-authored-by: Martin Hjelmare <[email protected]>
https://github.com/home-assistant/core.git
def iot_standards(self) -> list[str]: return self.brand.get("iot_standards", [])
23
model.py
Python
script/hassfest/model.py
b173ae7f444a330f92c25dfb5e3d581616a768cd
core
1
212,530
22
15
10
103
14
0
24
97
js_files
Normalize built-in types and remove `Unknown` (#12252) * Use lower case names for built-in types Also incidentally apply TypeAlias marker. * Drop `Unknown` in favour of consistent usage of `Any` * Enable lazy annotations in conftest.py
https://github.com/bokeh/bokeh.git
def js_files(self) -> list[str]: js_files: list[str] = [] for root, _, files in os.walk(self.bokehjsdir()): for fname in files: if fname.endswith(".js"): js_files.append(join(root, fname)) return js_files
64
settings.py
Python
bokeh/settings.py
528d85e642340ef30ec91f30b65c7c43370f648d
bokeh
4
304,814
41
10
15
115
20
0
53
224
_on_click
Improve type hint in flic binary sensor entity (#77161)
https://github.com/home-assistant/core.git
def _on_click(self, channel, click_type, was_queued, time_diff): # Return if click event was queued beyond allowed timeout if was_queued and self._queued_event_check(click_type, time_diff): return # Return if click event is in ignored click types hass_click_type = self._hass_click_types[click_type] if hass_click_type in self._ignored_click_types: return self._hass.bus.fire( EVENT_NAME, { EVENT_DATA_NAME: self.name, EVENT_DATA_ADDRESS: self._address, EVENT_DATA_QUEUED_TIME: time_diff, EVENT_DATA_TYPE: hass_click_type, }, )
77
binary_sensor.py
Python
homeassistant/components/flic/binary_sensor.py
3031caafed9811e0b3da146c2ee5a8a7f0080b5e
core
4
9,834
39
10
22
145
15
0
48
162
mixin_gateway_parser
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
https://github.com/jina-ai/jina.git
def mixin_gateway_parser(parser): gp = add_arg_group(parser, title='Gateway') _add_host(gp) _add_proxy(gp) gp.add_argument( '--port-expose', type=int, default=helper.random_port(), help='The port that the gateway exposes for clients for GRPC connections.', ) parser.add_argument( '--graph-description', type=str, help='Routing graph for the gateway', default='{}', ) parser.add_argument( '--pods-addresses', type=str, help='dictionary JSON with the input addresses of each Pod', default='{}', )
85
remote.py
Python
jina/parsers/peapods/runtimes/remote.py
933415bfa1f9eb89f935037014dfed816eb9815d
jina
1
64,349
79
14
38
462
34
0
107
69
create_production_plan
test: Production Plan Pending Qty impact tests - Two tests to check impact on pending qty: From SO and independent Prod Plan - Added docstring to each test case for brief summary - Changed helper function args to fallback to 0 instead of 1 if no arg is passed - Removed unnecessary `get_doc()` - Made helper function actions optional depending on args passed
https://github.com/frappe/erpnext.git
def create_production_plan(**args): args = frappe._dict(args) pln = frappe.get_doc({ 'doctype': 'Production Plan', 'company': args.company or '_Test Company', 'customer': args.customer or '_Test Customer', 'posting_date': nowdate(), 'include_non_stock_items': args.include_non_stock_items or 0, 'include_subcontracted_items': args.include_subcontracted_items or 0, 'ignore_existing_ordered_qty': args.ignore_existing_ordered_qty or 0, 'get_items_from': 'Sales Order' }) if not args.get("sales_order"): pln.append('po_items', { 'use_multi_level_bom': args.use_multi_level_bom or 1, 'item_code': args.item_code, 'bom_no': frappe.db.get_value('Item', args.item_code, 'default_bom'), 'planned_qty': args.planned_qty or 1, 'planned_start_date': args.planned_start_date or now_datetime() }) if args.get("get_items_from") == "Sales Order" and args.get("sales_order"): so = args.get("sales_order") pln.append('sales_orders', { 'sales_order': so.name, 'sales_order_date': so.transaction_date, 'customer': so.customer, 'grand_total': so.grand_total }) pln.get_items() if not args.get("skip_getting_mr_items"): mr_items = get_items_for_material_requests(pln.as_dict()) for d in mr_items: pln.append('mr_items', d) if not args.do_not_save: pln.insert() if not args.do_not_submit: pln.submit() return pln
261
test_production_plan.py
Python
erpnext/manufacturing/doctype/production_plan/test_production_plan.py
86ca41b14af45f44ec63a27ed10580b161a33b4c
erpnext
16
12,002
20
13
8
93
9
0
25
65
in_docker
feat: remove head for non sharded deployments (#4517) * refactor: remove rolling_update and scale * feat: skip head creation * feat: dont create head in k8s and docker * feat: reduce needs at the gateway * feat: adapt tests * style: fix overload and cli autocomplete * fix: more tests * fix: more tests * fix: more tests * fix: k8s tests * fix: file handler leaking * fix: more tests * fix: k8s tests * fix: k8s tests * refactor: move exception * fix: merge accident * fix: broken jinad test * refactor: update docs * feat: add ports property * style: fix overload and cli autocomplete Co-authored-by: Jina Dev Bot <[email protected]>
https://github.com/jina-ai/jina.git
def in_docker(): path = '/proc/self/cgroup' if os.path.exists('/.dockerenv'): return True if os.path.isfile(path): with open(path) as file: return any('docker' in line for line in file) return False
51
networking.py
Python
jina/serve/networking.py
12163af01009772035b2e87523663beb890a2549
jina
4
20,532
34
11
8
71
7
0
39
139
with_attribute
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def with_attribute(*args, **attr_dict): <div> Some text <div type="grid">1 4 0 1 0</div> <div type="graph">1,3 2,3 1,1</div> <div>this has no type</div> </div> if args: attrs = args[:] else: attrs = attr_dict.items() attrs = [(k, v) for k, v in attrs]
47
actions.py
Python
pipenv/patched/notpip/_vendor/pyparsing/actions.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
3
47,940
22
12
18
109
16
0
24
157
test_copy_with_target_credential
Update to the released version of DBSQL connector Also added additional parameters for further customization of connection if it's required
https://github.com/apache/airflow.git
def test_copy_with_target_credential(self): expression = "col1, col2" op = DatabricksCopyIntoOperator( file_location=COPY_FILE_LOCATION, file_format='CSV', table_name='test', task_id=TASK_ID, expression_list=expression, storage_credential='abc', credential={'AZURE_SAS_TOKEN': 'abc'}, ) assert ( op._create_sql_query() == f.strip() )
60
test_databricks_sql.py
Python
tests/providers/databricks/operators/test_databricks_sql.py
6a3d6cc32b4e3922d259c889460fe82e0ebf3663
airflow
1
270,641
6
7
12
20
3
0
6
21
configTestMesh
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def configTestMesh(device_type_mesh_map): # pylint: disable=invalid-name reset_context()
75
test_util.py
Python
keras/dtensor/test_util.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
2
202,989
22
13
8
126
15
0
31
67
get_commands
Refs #32355 -- Removed unnecessary list() calls before reversed() on dictviews. Dict and dictviews are iterable in reversed insertion order using reversed() in Python 3.8+.
https://github.com/django/django.git
def get_commands(): commands = {name: 'django.core' for name in find_commands(__path__[0])} if not settings.configured: return commands for app_config in reversed(apps.get_app_configs()): path = os.path.join(app_config.path, 'management') commands.update({name: app_config.name for name in find_commands(path)}) return commands
77
__init__.py
Python
django/core/management/__init__.py
7346c288e307e1821e3ceb757d686c9bd879389c
django
5
171,245
4
12
2
41
6
0
4
18
count
DEPR: Remove df.reduction(level) (#49611) * DEPR: Remove df.reduction(level) * test_*_consistency * Fix asv * Add issue ref
https://github.com/pandas-dev/pandas.git
def count(self): return notna(self._values).sum().astype("int64")
22
series.py
Python
pandas/core/series.py
dbb2adc1f353d9b0835901c274cbe0d2f5a5664f
pandas
1
125,678
39
12
12
124
18
0
44
149
get_assigned_resources
[core] runtime context resource ids getter (#26907)
https://github.com/ray-project/ray.git
def get_assigned_resources(self): assert ( self.worker.mode == ray._private.worker.WORKER_MODE ), f"This method is only available when the process is a\ worker. Current mode: {self.worker.mode}" self.worker.check_connected() resource_id_map = self.worker.core_worker.resource_ids() resource_map = { res: sum(amt for _, amt in mapping) for res, mapping in resource_id_map.items() } return resource_map
71
runtime_context.py
Python
python/ray/runtime_context.py
d01a80eb11d32e62e0a20ef8f84852b65be93892
ray
3
314,204
15
9
10
43
6
0
17
67
_temperature_unit
Weather unit conversion (#73441) Co-authored-by: Erik <[email protected]>
https://github.com/home-assistant/core.git
def _temperature_unit(self) -> str: if ( weather_option_temperature_unit := self._weather_option_temperature_unit ) is not None: return weather_option_temperature_unit return self._default_temperature_unit
26
__init__.py
Python
homeassistant/components/weather/__init__.py
90e1fb6ce2faadb9a35fdbe1774fce7b4456364f
core
2
299,644
20
12
11
116
13
0
36
133
referenced_devices
Fix missing device & entity references in automations (#71103)
https://github.com/home-assistant/core.git
def referenced_devices(self): if self._referenced_devices is not None: return self._referenced_devices referenced = self.action_script.referenced_devices if self._cond_func is not None: for conf in self._cond_func.config: referenced |= condition.async_extract_devices(conf) for conf in self._trigger_config: referenced |= set(_trigger_extract_device(conf)) self._referenced_devices = referenced return referenced
73
__init__.py
Python
homeassistant/components/automation/__init__.py
63679d3d2927f9e6b1029bca994a3fe138480faa
core
5
186,082
5
6
5
16
1
0
5
8
test_pressing_alpha_on_app
Add a test for actions being fired from bound keys Do this with a focus on detecting a bound alpha key, and a bound movement key
https://github.com/Textualize/textual.git
async def test_pressing_alpha_on_app() -> None:
40
test_binding_inheritance.py
Python
tests/test_binding_inheritance.py
751042f9d7c3a5ffeb026e025412db511e8a04ed
textual
1
269,622
72
18
19
288
37
1
100
366
set_value
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def set_value(x, value): value = np.asarray(value, dtype=dtype_numpy(x)) if tf.compat.v1.executing_eagerly_outside_functions(): x.assign(value) else: with get_graph().as_default(): tf_dtype = tf.as_dtype(x.dtype.name.split("_")[0]) if hasattr(x, "_assign_placeholder"): assign_placeholder = x._assign_placeholder assign_op = x._assign_op else: # In order to support assigning weights to resizable variables in # Keras, we make a placeholder with the correct number of dimensions # but with None in each dimension. This way, we can assign weights # of any size (as long as they have the correct dimensionality). placeholder_shape = tf.TensorShape([None] * value.ndim) assign_placeholder = tf.compat.v1.placeholder( tf_dtype, shape=placeholder_shape ) assign_op = x.assign(assign_placeholder) x._assign_placeholder = assign_placeholder x._assign_op = assign_op get_session().run(assign_op, feed_dict={assign_placeholder: value}) @keras_export("keras.backend.batch_set_value") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
@keras_export("keras.backend.batch_set_value") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
155
backend.py
Python
keras/backend.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
3
45,150
43
15
17
183
17
0
64
237
upsert_document
(AzureCosmosDBHook) Update to latest Cosmos API (#21514) * Bumping the ms azure cosmos providers to work with the 4.x azure python sdk api Co-authored-by: gatewoodb <[email protected]>
https://github.com/apache/airflow.git
def upsert_document(self, document, database_name=None, collection_name=None, document_id=None): # Assign unique ID if one isn't provided if document_id is None: document_id = str(uuid.uuid4()) if document is None: raise AirflowBadRequest("You cannot insert a None document") # Add document id if isn't found if 'id' in document: if document['id'] is None: document['id'] = document_id else: document['id'] = document_id created_document = ( self.get_conn() .get_database_client(self.__get_database_name(database_name)) .get_container_client(self.__get_collection_name(collection_name)) .upsert_item(document) ) return created_document
108
cosmos.py
Python
airflow/providers/microsoft/azure/hooks/cosmos.py
3c4524b4ec2b42a8af0a8c7b9d8f1d065b2bfc83
airflow
5
299,371
158
14
190
1,720
40
0
466
2,128
test_group_features
Migrate hue v1 light to color_mode (#69275) * Migrate hue v1 light to color_mode * Fix test * Correct filter_supported_color_modes + add test * Use ColorMode enum
https://github.com/home-assistant/core.git
async def test_group_features(hass, mock_bridge_v1): color_temp_type = "Color temperature light" extended_color_type = "Extended color light" group_response = { "1": { "name": "Group 1", "lights": ["1", "2"], "type": "LightGroup", "action": { "on": True, "bri": 254, "hue": 10000, "sat": 254, "effect": "none", "xy": [0.5, 0.5], "ct": 250, "alert": "select", "colormode": "ct", }, "state": {"any_on": True, "all_on": False}, }, "2": { "name": "Living Room", "lights": ["2", "3"], "type": "Room", "action": { "on": True, "bri": 153, "hue": 4345, "sat": 254, "effect": "none", "xy": [0.5, 0.5], "ct": 250, "alert": "select", "colormode": "ct", }, "state": {"any_on": True, "all_on": False}, }, "3": { "name": "Dining Room", "lights": ["4"], "type": "Room", "action": { "on": True, "bri": 153, "hue": 4345, "sat": 254, "effect": "none", "xy": [0.5, 0.5], "ct": 250, "alert": "select", "colormode": "ct", }, "state": {"any_on": True, "all_on": False}, }, } light_1 = { "state": { "on": True, "bri": 144, "ct": 467, "alert": "none", "effect": "none", "reachable": True, }, "capabilities": { "control": { "colorgamuttype": "A", "colorgamut": [[0.704, 0.296], [0.2151, 0.7106], [0.138, 0.08]], } }, "type": color_temp_type, "name": "Hue Lamp 1", "modelid": "LCT001", "swversion": "66009461", "manufacturername": "Philips", "uniqueid": "456", } light_2 = { "state": { "on": False, "bri": 0, "ct": 0, "alert": "none", "effect": "none", "colormode": "xy", "reachable": True, }, "capabilities": { "control": { "colorgamuttype": "A", "colorgamut": [[0.704, 0.296], [0.2151, 0.7106], [0.138, 0.08]], } }, "type": color_temp_type, "name": "Hue Lamp 2", "modelid": "LCT001", "swversion": "66009461", "manufacturername": "Philips", "uniqueid": "4567", } light_3 = { "state": { "on": False, "bri": 0, "hue": 0, "sat": 0, "xy": [0, 0], "ct": 0, "alert": "none", "effect": "none", "colormode": "hs", "reachable": True, }, "capabilities": { "control": { "colorgamuttype": "A", "colorgamut": [[0.704, 0.296], [0.2151, 0.7106], [0.138, 0.08]], } }, "type": extended_color_type, "name": "Hue Lamp 3", "modelid": "LCT001", "swversion": "66009461", "manufacturername": "Philips", "uniqueid": "123", } light_4 = { "state": { "on": True, "bri": 100, "hue": 13088, "sat": 210, "xy": [0.5, 0.4], "ct": 420, "alert": "none", "effect": "none", "colormode": "hs", "reachable": True, }, "capabilities": { "control": { "colorgamuttype": "A", "colorgamut": [[0.704, 0.296], [0.2151, 0.7106], [0.138, 0.08]], } }, "type": extended_color_type, "name": "Hue Lamp 4", "modelid": "LCT001", "swversion": "66009461", "manufacturername": "Philips", "uniqueid": "1234", } light_response = { "1": light_1, "2": light_2, "3": light_3, "4": light_4, } mock_bridge_v1.mock_light_responses.append(light_response) mock_bridge_v1.mock_group_responses.append(group_response) await setup_bridge(hass, mock_bridge_v1) assert len(mock_bridge_v1.mock_requests) == 2 color_temp_feature = hue_light.SUPPORT_HUE["Color temperature light"] color_temp_mode = sorted(hue_light.COLOR_MODES_HUE["Color temperature light"]) extended_color_feature = hue_light.SUPPORT_HUE["Extended color light"] extended_color_mode = sorted(hue_light.COLOR_MODES_HUE["Extended color light"]) group_1 = hass.states.get("light.group_1") assert group_1.attributes["supported_color_modes"] == color_temp_mode assert group_1.attributes["supported_features"] == color_temp_feature group_2 = hass.states.get("light.living_room") assert group_2.attributes["supported_color_modes"] == extended_color_mode assert group_2.attributes["supported_features"] == extended_color_feature group_3 = hass.states.get("light.dining_room") assert group_3.attributes["supported_color_modes"] == extended_color_mode assert group_3.attributes["supported_features"] == extended_color_feature entity_registry = er.async_get(hass) device_registry = dr.async_get(hass) entry = entity_registry.async_get("light.hue_lamp_1") device_entry = device_registry.async_get(entry.device_id) assert device_entry.suggested_area is None entry = entity_registry.async_get("light.hue_lamp_2") device_entry = device_registry.async_get(entry.device_id) assert device_entry.suggested_area == "Living Room" entry = entity_registry.async_get("light.hue_lamp_3") device_entry = device_registry.async_get(entry.device_id) assert device_entry.suggested_area == "Living Room" entry = entity_registry.async_get("light.hue_lamp_4") device_entry = device_registry.async_get(entry.device_id) assert device_entry.suggested_area == "Dining Room"
1,012
test_light_v1.py
Python
tests/components/hue/test_light_v1.py
573e966d74221641b13e3530fcf60240da6596be
core
1
270,132
61
15
25
351
27
0
90
194
load_data
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def load_data(path="boston_housing.npz", test_split=0.2, seed=113): assert 0 <= test_split < 1 origin_folder = ( "https://storage.googleapis.com/tensorflow/tf-keras-datasets/" ) path = get_file( path, origin=origin_folder + "boston_housing.npz", file_hash="f553886a1f8d56431e820c5b82552d9d95cfcb96d1e678153f8839538947dff5", ) with np.load( path, allow_pickle=True ) as f: # pylint: disable=unexpected-keyword-arg x = f["x"] y = f["y"] rng = np.random.RandomState(seed) indices = np.arange(len(x)) rng.shuffle(indices) x = x[indices] y = y[indices] x_train = np.array(x[: int(len(x) * (1 - test_split))]) y_train = np.array(y[: int(len(x) * (1 - test_split))]) x_test = np.array(x[int(len(x) * (1 - test_split)) :]) y_test = np.array(y[int(len(x) * (1 - test_split)) :]) return (x_train, y_train), (x_test, y_test)
219
boston_housing.py
Python
keras/datasets/boston_housing.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
119,699
91
17
27
451
20
0
200
384
mock_devices
Improve TPU v2 and v3 mesh_utils.create_device_mesh logic. * Fixes a bug when a non-3D mesh was requested * Adds new logic when requesting a single-host mesh * Extends logic to v2 as well as v3
https://github.com/google/jax.git
def mock_devices(x, y, z, dev_kind, two_cores_per_chip): devices = [] process_index = 0 for k in range(z): for j in range(0, y, 2): for i in range(0, x, 2): # Local 2x2 subgrid of chips, with 2 cores per chip. host_devices = [ MockTpuDevice(-1, 'tpu', dev_kind, process_index, (i, j, k), 0), MockTpuDevice(-1, 'tpu', dev_kind, process_index, (i, j, k), 1), MockTpuDevice(-1, 'tpu', dev_kind, process_index, (i + 1, j, k), 0), MockTpuDevice(-1, 'tpu', dev_kind, process_index, (i + 1, j, k), 1), MockTpuDevice(-1, 'tpu', dev_kind, process_index, (i, j + 1, k), 0), MockTpuDevice(-1, 'tpu', dev_kind, process_index, (i, j + 1, k), 1), MockTpuDevice(-1, 'tpu', dev_kind, process_index, (i + 1, j + 1, k), 0), MockTpuDevice(-1, 'tpu', dev_kind, process_index, (i + 1, j + 1, k), 1), ] if two_cores_per_chip: # Only include core_on_chip = 0. host_devices = host_devices[::2] devices.extend(host_devices) # Simulate one process per host (1 host = 2x2x1 slice) process_index += 1 # id grows in (z, y, x) major order for d in devices: i, j, k = d.coords d.id = k*x*y + j*x + i if not two_cores_per_chip: d.id = d.id * 2 + d.core_on_chip _validate_mocked_process_indices(devices, two_cores_per_chip) return devices # If this function raises, it's a bug in the test code!
322
mesh_utils_test.py
Python
tests/mesh_utils_test.py
bcee442390e0dfbbe078493af0314b515fff97cc
jax
7
168,195
117
16
38
367
43
0
155
615
transform_dict_like
PERF cache find_stack_level (#48023) cache stacklevel
https://github.com/pandas-dev/pandas.git
def transform_dict_like(self, func): from pandas.core.reshape.concat import concat obj = self.obj args = self.args kwargs = self.kwargs # transform is currently only for Series/DataFrame assert isinstance(obj, ABCNDFrame) if len(func) == 0: raise ValueError("No transform functions were provided") func = self.normalize_dictlike_arg("transform", obj, func) results: dict[Hashable, DataFrame | Series] = {} failed_names = [] all_type_errors = True for name, how in func.items(): colg = obj._gotitem(name, ndim=1) try: results[name] = colg.transform(how, 0, *args, **kwargs) except Exception as err: if str(err) in { "Function did not transform", "No transform functions were provided", }: raise err else: if not isinstance(err, TypeError): all_type_errors = False failed_names.append(name) # combine results if not results: klass = TypeError if all_type_errors else ValueError raise klass("Transform function failed") if len(failed_names) > 0: warnings.warn( f"{failed_names} did not transform successfully. If any error is " f"raised, this will raise in a future version of pandas. " f"Drop these columns/ops to avoid this warning.", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) return concat(results, axis=1)
227
apply.py
Python
pandas/core/apply.py
2f8d0a36703e81e4dca52ca9fe4f58c910c1b304
pandas
9
92,049
15
10
46
63
10
0
18
57
set_logged_in
feat(SU modal) : Improved superuser modal flow when user has an expired sso session (#35553)
https://github.com/getsentry/sentry.git
def set_logged_in(self, user, prefilled_su_modal=None, current_datetime=None): request = self.request if current_datetime is None: current_datetime = timezone.now() token = get_random_string(12)
244
superuser.py
Python
src/sentry/auth/superuser.py
05ffe4df7f0018cb0990fbd25fc838d0187ccca5
sentry
8
127,694
9
8
4
39
5
0
10
38
node_id
[core/docs] Update worker docstring (#28495) Co-authored-by: Philipp Moritz <[email protected]>
https://github.com/ray-project/ray.git
def node_id(self): node_id = self.worker.current_node_id assert not node_id.is_nil() return node_id
22
runtime_context.py
Python
python/ray/runtime_context.py
8ffe435173aee0f313f786e7301d05f608b6d5be
ray
1
291,293
9
9
2
31
3
0
9
15
test_get_rpc_channel_name
Add Shelly tests coverage (#82642) * Add Shelly tests coverage * Review comments * Remove leftovers
https://github.com/home-assistant/core.git
async def test_get_rpc_channel_name(mock_rpc_device): assert get_rpc_channel_name(mock_rpc_device, "input:0") == "test switch_0"
15
test_utils.py
Python
tests/components/shelly/test_utils.py
1e68e8c4b4836c9aabe5451426053428b2af905c
core
1
134,166
48
12
11
135
16
0
60
192
collect
Fix metrics exporter exporting metrics in incorrect format (#29488) Signed-off-by: Alan Guo [email protected] Ray was using prometheus client wrong a few ways: We were registering the Collector to the RegistryCollector multiple times The collector was exporting a new "metric" for each tag combination instead of using a single Metric with multiple samples. We were creating a new RegistryCollector that was unused instead of re-using the "REGISTRY" singleton
https://github.com/ray-project/ray.git
def collect(self): # pragma: NO COVER # Make a shallow copy of self._view_name_to_data_map, to avoid seeing # concurrent modifications when iterating through the dictionary. metrics_map = {} for v_name, view_data in self._view_name_to_data_map.copy().items(): if v_name not in self.registered_views: continue desc = self.registered_views[v_name] for tag_values in view_data.tag_value_aggregation_data_map: agg_data = view_data.tag_value_aggregation_data_map[tag_values] metric = self.to_metric(desc, tag_values, agg_data, metrics_map) for metric in metrics_map.values(): yield metric
84
prometheus_exporter.py
Python
python/ray/_private/prometheus_exporter.py
05ea05d05659eb2bf89ab374f6df67c5573bd4d9
ray
5
246,084
51
12
7
100
13
0
65
178
_store_rejected_events_txn
Add `state_key` and `rejection_reason` to `events` (#11792) ... and start populating them for new events
https://github.com/matrix-org/synapse.git
def _store_rejected_events_txn(self, txn, events_and_contexts): # Remove the rejected events from the list now that we've added them # to the events table and the events_json table. to_remove = set() for event, context in events_and_contexts: if context.rejected: # Insert the event_id into the rejections table # (events.rejection_reason has already been done) self._store_rejections_txn(txn, event.event_id, context.rejected) to_remove.add(event) return [ec for ec in events_and_contexts if ec[0] not in to_remove]
63
events.py
Python
synapse/storage/databases/main/events.py
2aa37a4250675f6d9feb57ec0dce65b2a6a3cde6
synapse
5
210,789
28
10
10
186
16
0
45
119
resize_pos_embed
add vit, adamw_ld (#6059) * add vit, adamw_ld * update
https://github.com/PaddlePaddle/PaddleDetection.git
def resize_pos_embed(self, pos_embed, old_hw, new_hw): cls_pos_embed = pos_embed[:, :1, :] pos_embed = pos_embed[:, 1:, :] pos_embed = pos_embed.transpose([0, 2, 1]) pos_embed = pos_embed.reshape([1, -1, old_hw[0], old_hw[1]]) pos_embed = F.interpolate( pos_embed, new_hw, mode='bicubic', align_corners=False) pos_embed = pos_embed.flatten(2).transpose([0, 2, 1]) pos_embed = paddle.concat([cls_pos_embed, pos_embed], axis=1) return pos_embed
126
vision_transformer.py
Python
ppdet/modeling/backbones/vision_transformer.py
63e7cfa414f67fc7f7cc1117325a0026d7721aab
PaddleDetection
1
259,210
146
20
79
671
45
0
259
1,645
_compute_drop_idx
ENH Adds infrequent categories to OneHotEncoder (#16018) * ENH Completely adds infrequent categories * STY Linting * STY Linting * DOC Improves wording * DOC Lint * BUG Fixes * CLN Address comments * CLN Address comments * DOC Uses math to description float min_frequency * DOC Adds comment regarding drop * BUG Fixes method name * DOC Clearer docstring * TST Adds more tests * FIX Fixes mege * CLN More pythonic * CLN Address comments * STY Flake8 * CLN Address comments * DOC Fix * MRG * WIP * ENH Address comments * STY Fix * ENH Use functiion call instead of property * ENH Adds counts feature * CLN Rename variables * DOC More details * CLN Remove unneeded line * CLN Less lines is less complicated * CLN Less diffs * CLN Improves readiabilty * BUG Fix * CLN Address comments * TST Fix * CLN Address comments * CLN Address comments * CLN Move docstring to userguide * DOC Better wrapping * TST Adds test to handle_unknown='error' * ENH Spelling error in docstring * BUG Fixes counter with nan values * BUG Removes unneeded test * BUG Fixes issue * ENH Sync with main * DOC Correct settings * DOC Adds docstring * DOC Immprove user guide * DOC Move to 1.0 * DOC Update docs * TST Remove test * DOC Update docstring * STY Linting * DOC Address comments * ENH Neater code * DOC Update explaination for auto * Update sklearn/preprocessing/_encoders.py Co-authored-by: Roman Yurchak <[email protected]> * TST Uses docstring instead of comments * TST Remove call to fit * TST Spelling error * ENH Adds support for drop + infrequent categories * ENH Adds infrequent_if_exist option * DOC Address comments for user guide * DOC Address comments for whats_new * DOC Update docstring based on comments * CLN Update test with suggestions * ENH Adds computed property infrequent_categories_ * DOC Adds where the infrequent column is located * TST Adds more test for infrequent_categories_ * DOC Adds docstring for _compute_drop_idx * CLN Moves _convert_to_infrequent_idx into its own method * TST Increases test coverage * TST Adds failing test * CLN Careful consideration of dropped and inverse_transform * STY Linting * DOC Adds docstrinb about dropping infrequent * DOC Uses only * DOC Numpydoc * TST Includes test for get_feature_names_out * DOC Move whats new * DOC Address docstring comments * DOC Docstring changes * TST Better comments * TST Adds check for handle_unknown='ignore' for infrequent * CLN Make _infrequent_indices private * CLN Change min_frequency default to None * DOC Adds comments * ENH adds support for max_categories=1 * ENH Describe lexicon ordering for ties * DOC Better docstring * STY Fix * CLN Error when explicity dropping an infrequent category * STY Grammar Co-authored-by: Joel Nothman <[email protected]> Co-authored-by: Roman Yurchak <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def _compute_drop_idx(self): if self.drop is None: return None elif isinstance(self.drop, str): if self.drop == "first": return np.zeros(len(self.categories_), dtype=object) elif self.drop == "if_binary": n_features_out_no_drop = [len(cat) for cat in self.categories_] if self._infrequent_enabled: for i, infreq_idx in enumerate(self._infrequent_indices): if infreq_idx is None: continue n_features_out_no_drop[i] -= infreq_idx.size - 1 return np.array( [ 0 if n_features_out == 2 else None for n_features_out in n_features_out_no_drop ], dtype=object, ) else: msg = ( "Wrong input for parameter `drop`. Expected " "'first', 'if_binary', None or array of objects, got {}" ) raise ValueError(msg.format(type(self.drop))) else: try: drop_array = np.asarray(self.drop, dtype=object) droplen = len(drop_array) except (ValueError, TypeError): msg = ( "Wrong input for parameter `drop`. Expected " "'first', 'if_binary', None or array of objects, got {}" ) raise ValueError(msg.format(type(drop_array))) if droplen != len(self.categories_): msg = ( "`drop` should have length equal to the number " "of features ({}), got {}" ) raise ValueError(msg.format(len(self.categories_), droplen)) missing_drops = [] drop_indices = [] for feature_idx, (drop_val, cat_list) in enumerate( zip(drop_array, self.categories_) ): if not is_scalar_nan(drop_val): drop_idx = np.where(cat_list == drop_val)[0] if drop_idx.size: # found drop idx drop_indices.append( self._map_drop_idx_to_infrequent(feature_idx, drop_idx[0]) ) else: missing_drops.append((feature_idx, drop_val)) continue # drop_val is nan, find nan in categories manually for cat_idx, cat in enumerate(cat_list): if is_scalar_nan(cat): drop_indices.append( self._map_drop_idx_to_infrequent(feature_idx, cat_idx) ) break else: # loop did not break thus drop is missing missing_drops.append((feature_idx, drop_val)) if any(missing_drops): msg = ( "The following categories were supposed to be " "dropped, but were not found in the training " "data.\n{}".format( "\n".join( [ "Category: {}, Feature: {}".format(c, v) for c, v in missing_drops ] ) ) ) raise ValueError(msg) return np.array(drop_indices, dtype=object)
411
_encoders.py
Python
sklearn/preprocessing/_encoders.py
7f0006c8aad1a09621ad19c3db19c3ff0555a183
scikit-learn
20
181,812
6
6
3
24
6
0
6
13
_gen_grow_safe
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
https://github.com/EpistasisLab/tpot.git
def _gen_grow_safe(self, pset, min_, max_, type_=None):
33
base.py
Python
tpot/base.py
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
1
261,237
61
12
24
317
26
0
91
216
weighted_mode
DOC Ensures that sklearn.utils.extmath.weighted_mode passes numpydoc validation (#24571) Co-authored-by: jeremie du boisberranger <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def weighted_mode(a, w, *, axis=0): if axis is None: a = np.ravel(a) w = np.ravel(w) axis = 0 else: a = np.asarray(a) w = np.asarray(w) if a.shape != w.shape: w = np.full(a.shape, w, dtype=w.dtype) scores = np.unique(np.ravel(a)) # get ALL unique values testshape = list(a.shape) testshape[axis] = 1 oldmostfreq = np.zeros(testshape) oldcounts = np.zeros(testshape) for score in scores: template = np.zeros(a.shape) ind = a == score template[ind] = w[ind] counts = np.expand_dims(np.sum(template, axis), axis) mostfrequent = np.where(counts > oldcounts, score, oldmostfreq) oldcounts = np.maximum(counts, oldcounts) oldmostfreq = mostfrequent return mostfrequent, oldcounts
203
extmath.py
Python
sklearn/utils/extmath.py
c674e589f9aa19ebd1151c19413622f96c8ed368
scikit-learn
4
64,494
8
11
28
48
9
0
9
6
get_data
fix: Get MRs that are yet to be received but fully ordered in Report - Remove incorrect query clause that only check if ordered qty < 100 - MR should be visible in report until fully received (cycle complete)
https://github.com/frappe/erpnext.git
def get_data(filters, conditions): data = frappe.db.sql(.format(conditions=conditions), as_dict=1) return data
30
requested_items_to_order_and_receive.py
Python
erpnext/buying/report/requested_items_to_order_and_receive/requested_items_to_order_and_receive.py
d3b0ca30c6ae0e979b7bdddbe67018941be8d59b
erpnext
1
260,537
38
12
13
147
17
0
44
159
transform
MAINT parameter validation for CountVectorizer & TfidfVectorizer (#23853) Co-authored-by: Meekail Zain <[email protected]> Co-authored-by: jeremiedbb <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def transform(self, X): if isinstance(X, str): raise ValueError( "Iterable over raw text documents expected, string object received." ) self._validate_ngram_range() analyzer = self.build_analyzer() X = self._get_hasher().transform(analyzer(doc) for doc in X) if self.binary: X.data.fill(1) if self.norm is not None: X = normalize(X, norm=self.norm, copy=False) return X
91
text.py
Python
sklearn/feature_extraction/text.py
c300a8f2178fcae847f82ad548fe9452f2ba8bbb
scikit-learn
5
35,532
6
9
3
43
6
0
6
27
test_causal_lm_model_as_decoder
Fix tf.concatenate + test past_key_values for TF models (#15774) * fix wrong method name tf.concatenate * add tests related to causal LM / decoder * make style and quality * clean-up * Fix TFBertModel's extended_attention_mask when past_key_values is provided * Fix tests * fix copies * More tf.int8 -> tf.int32 in TF test template * clean-up * Update TF test template * revert the previous commit + update the TF test template * Fix TF template extended_attention_mask when past_key_values is provided * Fix some styles manually * clean-up * Fix ValueError: too many values to unpack in the test * Fix more: too many values to unpack in the test * Add a comment for extended_attention_mask when there is past_key_values * Fix TFElectra extended_attention_mask when past_key_values is provided * Add tests to other TF models * Fix for TF Electra test: add prepare_config_and_inputs_for_decoder * Fix not passing training arg to lm_head in TFRobertaForCausalLM * Fix tests (with past) for TF Roberta * add testing for pask_key_values for TFElectra model Co-authored-by: ydshieh <[email protected]>
https://github.com/huggingface/transformers.git
def test_causal_lm_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_causal_lm_model_as_decoder(*config_and_inputs)
24
test_modeling_tf_{{cookiecutter.lowercase_modelname}}.py
Python
templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_tf_{{cookiecutter.lowercase_modelname}}.py
8635407bc724c45142c1f91dbc9ef3ea681e1a56
transformers
1
144,511
11
9
7
86
12
0
11
60
testAsyncSave
[tune] Single wait refactor. (#21852) This is a down scoped change. For the full overview picture of Tune control loop, see [`Tune control loop refactoring`](https://docs.google.com/document/d/1RDsW7SVzwMPZfA0WLOPA4YTqbRyXIHGYmBenJk33HaE/edit#heading=h.2za3bbxbs5gn) 1. Previously there are separate waits on pg ready and other events. As a result, there are quite a few timing tweaks that are inefficient, hard to understand and unit test. This PR consolidates into a single wait that is handled by TrialRunner in each step. - A few event types are introduced, and their mapping into scenarios * PG_READY --> Should place a trial onto it. If somehow there is no trial to be placed there, the pg will be put in _ready momentarily. This is due to historically resources is conceptualized as a pull based model. * NO_RUNNING_TRIALS_TIME_OUT --> possibly not sufficient resources case * TRAINING_RESULT * SAVING_RESULT * RESTORING_RESULT * YIELD --> This just means that simply taking very long to train. We need to punt back to the main loop to print out status info etc. 2. Previously TrialCleanup is not very efficient and can be racing between Trainable.stop() and `return_placement_group`. This PR streamlines the Trial cleanup process by explicitly let Trainable.stop() to finish followed by `return_placement_group(pg)`. Note, graceful shutdown is needed in cases like `pause_trial` where checkpointing to memory needs to be given the time to happen before the actor is gone. 3. There are quite some env variables removed (timing tweaks), that I consider OK to proceed without deprecation cycle.
https://github.com/ray-project/ray.git
def testAsyncSave(self): trial = Trial("__fake") self._simulate_starting_trial(trial) self._simulate_getting_result(trial) self._simulate_saving(trial) self.trial_executor.stop_trial(trial) self.assertEqual(Trial.TERMINATED, trial.status)
50
test_ray_trial_executor.py
Python
python/ray/tune/tests/test_ray_trial_executor.py
323511b716416088859967686c71889ef8425204
ray
1
199,970
14
17
6
185
9
0
15
69
phase_retarder
removed backticks around variable names in docs according to PR review
https://github.com/sympy/sympy.git
def phase_retarder(theta=0, delta=0): R = Matrix([[cos(theta)**2 + exp(I*delta)*sin(theta)**2, (1-exp(I*delta))*cos(theta)*sin(theta)], [(1-exp(I*delta))*cos(theta)*sin(theta), sin(theta)**2 + exp(I*delta)*cos(theta)**2]]) return R*exp(-I*delta/2)
118
polarization.py
Python
sympy/physics/optics/polarization.py
ae2baaa0bbcd42792bb2e7887ca61b97abc40463
sympy
1
259,552
81
13
30
453
37
1
129
261
test_ridge_regression
TST tight and clean tests for Ridge (#22910) * MNT replace pinvh by solve * DOC more info for svd solver * TST rewrite test_ridge * MNT remove test_ridge_singular * MNT restructure into several tests * MNT remove test_toy_ridge_object * MNT remove test_ridge_sparse_svd This is tested in test_ridge_fit_intercept_sparse_error. * TST exclude cholesky from singular problem * CLN two fixes * MNT parametrize test_ridge_sample_weights * MNT restructure test_ridge_sample_weights * CLN tighten tolerance for sag solver * CLN try to fix saga tolerance * CLN make test_ridge_sample_weights nicer * MNT remove test_ridge_regression_sample_weights * MNT rename to test_ridge_regression_sample_weights * CLN make test_ridge_regression_unpenalized pass for all random seeds * CLN make tests pass for all random seeds * DOC fix typos * TST skip cholesky for singular problems * MNT move up test_ridge_regression_sample_weights * CLN set skip reason as comment
https://github.com/scikit-learn/scikit-learn.git
def test_ridge_regression(solver, fit_intercept, ols_ridge_dataset, global_random_seed): X, y, _, coef = ols_ridge_dataset alpha = 1.0 # because ols_ridge_dataset uses this. params = dict( alpha=alpha, fit_intercept=True, solver=solver, tol=1e-15 if solver in ("sag", "saga") else 1e-10, random_state=global_random_seed, ) # Calculate residuals and R2. res_null = y - np.mean(y) res_Ridge = y - X @ coef R2_Ridge = 1 - np.sum(res_Ridge**2) / np.sum(res_null**2) model = Ridge(**params) X = X[:, :-1] # remove intercept if fit_intercept: intercept = coef[-1] else: X = X - X.mean(axis=0) y = y - y.mean() intercept = 0 model.fit(X, y) coef = coef[:-1] assert model.intercept_ == pytest.approx(intercept) assert_allclose(model.coef_, coef) assert model.score(X, y) == pytest.approx(R2_Ridge) # Same with sample_weight. model = Ridge(**params).fit(X, y, sample_weight=np.ones(X.shape[0])) assert model.intercept_ == pytest.approx(intercept) assert_allclose(model.coef_, coef) assert model.score(X, y) == pytest.approx(R2_Ridge) @pytest.mark.parametrize("solver", SOLVERS) @pytest.mark.parametrize("fit_intercept", [True, False])
@pytest.mark.parametrize("solver", SOLVERS) @pytest.mark.parametrize("fit_intercept", [True, False])
270
test_ridge.py
Python
sklearn/linear_model/tests/test_ridge.py
6528e14085d059f9d0c94f93378e7e3c0b967f27
scikit-learn
3
19,871
22
10
7
60
7
0
25
75
get_file_to_edit
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def get_file_to_edit(self) -> Optional[str]: assert self.load_only is not None, "Need to be specified a file to be editing" try: return self._get_parser_to_modify()[0] except IndexError: return None
36
configuration.py
Python
pipenv/patched/notpip/_internal/configuration.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
2
171,449
47
14
20
166
21
0
64
201
_set_group_selection
BUG: groupby.describe with as_index=False incorrect (#49643) * BUG: groupby.describe with as_index=False incorrect * Add test for two groupings * Simplify logic
https://github.com/pandas-dev/pandas.git
def _set_group_selection(self) -> None: # This is a no-op for SeriesGroupBy grp = self.grouper if not ( grp.groupings is not None and self.obj.ndim > 1 and self._group_selection is None ): return groupers = [g.name for g in grp.groupings if g.level is None and g.in_axis] if len(groupers): # GH12839 clear selected obj cache when group selection changes ax = self.obj._info_axis self._group_selection = ax.difference(Index(groupers), sort=False).tolist() self._reset_cache("_selected_obj")
102
groupby.py
Python
pandas/core/groupby/groupby.py
68e2c2ae8b714bc9bcbdf9e98793bb681048273a
pandas
8
210,791
29
15
10
151
14
0
42
88
layerwise_lr_decay
add vit, adamw_ld (#6059) * add vit, adamw_ld * update
https://github.com/PaddlePaddle/PaddleDetection.git
def layerwise_lr_decay(decay_rate, name_dict, n_layers, param): ratio = 1.0 static_name = name_dict[param.name] if "blocks" in static_name: idx = static_name.find("blocks.") layer = int(static_name[idx:].split(".")[1]) ratio = decay_rate**(n_layers - layer) elif "cls_token" in static_name or 'patch_embed' in static_name: ratio = decay_rate**(n_layers + 1) param.optimize_attr["learning_rate"] *= ratio
92
adamw.py
Python
ppdet/optimizer/adamw.py
63e7cfa414f67fc7f7cc1117325a0026d7721aab
PaddleDetection
4
186,686
10
10
8
53
6
0
10
46
ensure_augeas_state
Add typing to certbot.apache (#9071) * Add typing to certbot.apache Co-authored-by: Adrien Ferrand <[email protected]>
https://github.com/certbot/certbot.git
def ensure_augeas_state(self) -> None: if self.unsaved_files(): self.configurator.save_notes += "(autosave)" self.configurator.save()
29
parser.py
Python
certbot-apache/certbot_apache/_internal/parser.py
7d9e9a49005de7961e84d2a7c608db57dbab3046
certbot
2
35,049
19
11
6
82
14
0
22
65
overflow_fallback
Upgrade black to version ~=22.0 (#15565) * Upgrade black to version ~=22.0 * Check copies * Fix code
https://github.com/huggingface/transformers.git
def overflow_fallback(self, y_int): self.set_shift(y_int) # adjusts `self.shift` y_int_shifted = floor_ste.apply(y_int / 2**self.shift) y_sq_int = y_int_shifted**2 var_int = torch.sum(y_sq_int, axis=2, keepdim=True) return var_int
51
quant_modules.py
Python
src/transformers/models/ibert/quant_modules.py
7732d0fe7a759c9844215920e9f1c5540eafb1a6
transformers
1
46,604
17
16
7
97
12
0
20
102
_discover_secrets_backends
Suppress import errors for providers from sources (#22579) When we are running airflow locally with providers installed from sources, often many providers will be discovered which we haven't installed the deps for. This generally results in a very large amount of traceback logging, which has a very negative effect on usefulness of terminal output. Here we suppress this error logging for providers that are installed from sources.
https://github.com/apache/airflow.git
def _discover_secrets_backends(self) -> None: for provider_package, provider in self._provider_dict.items(): if provider.data.get("secrets-backends"): for secrets_backends_class_name in provider.data["secrets-backends"]: if _sanity_check(provider_package, secrets_backends_class_name, provider): self._secrets_backend_class_name_set.add(secrets_backends_class_name)
59
providers_manager.py
Python
airflow/providers_manager.py
b5a786b38148295c492da8ab731d5e2f6f86ccf7
airflow
5
209,829
15
10
3
47
5
0
17
53
availablemodes
[Hinty] Core typing: windows (#3684) * Core typing: windows Co-authored-by: Pierre <[email protected]>
https://github.com/secdev/scapy.git
def availablemodes(self): # type: () -> List[str] # According to https://nmap.org/npcap/guide/npcap-devguide.html#npcap-feature-dot11 # noqa: E501 self._check_npcap_requirement() return self._npcap_get("modes").split(",")
23
__init__.py
Python
scapy/arch/windows/__init__.py
a2b7a28faff1db058dd22ce097a268e0ad5d1d33
scapy
1
261,639
33
10
9
120
8
0
51
92
_safe_assign
MAINT test globally setting output via context manager (#24932) Co-authored-by: jeremie du boisberranger <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def _safe_assign(X, values, *, row_indexer=None, column_indexer=None): row_indexer = slice(None, None, None) if row_indexer is None else row_indexer column_indexer = ( slice(None, None, None) if column_indexer is None else column_indexer ) if hasattr(X, "iloc"): # pandas dataframe X.iloc[row_indexer, column_indexer] = values else: # numpy array or sparse matrix X[row_indexer, column_indexer] = values
80
__init__.py
Python
sklearn/utils/__init__.py
af16e5934ae269d05fd7df983b97def7c0ef0bd2
scikit-learn
4
5,829
44
14
18
149
10
0
71
185
verify_liking
Updated Instagram xpaths. Added (#6649) Co-authored-by: RDC Projects <[email protected]>
https://github.com/InstaPy/InstaPy.git
def verify_liking(browser, maximum, minimum, logger): post_page = get_additional_data(browser) likes_count = post_page["items"][0]["like_count"] if not likes_count: likes_count = 0 if maximum is not None and likes_count > maximum: logger.info( "Not liked this post! ~more likes exist off maximum limit at " "{}".format(likes_count) ) return False elif minimum is not None and likes_count < minimum: logger.info( "Not liked this post! ~less likes exist off minimum limit " "at {}".format(likes_count) ) return False return True
87
like_util.py
Python
instapy/like_util.py
f0f568e5b89952d1609f69a7820d80f1d34b45ad
InstaPy
6
248,625
12
9
8
88
9
0
16
51
test_first_upgrade_does_not_block_second
Add more tests for room upgrades (#13074) Signed-off-by: Sean Quah <[email protected]>
https://github.com/matrix-org/synapse.git
def test_first_upgrade_does_not_block_second(self) -> None: channel = self._upgrade_room(self.other_token) self.assertEqual(403, channel.code, channel.result) channel = self._upgrade_room(expire_cache=False) self.assertEqual(200, channel.code, channel.result)
56
test_upgrade_room.py
Python
tests/rest/client/test_upgrade_room.py
99d3931974e65865d1102ee79d7b7e2b017a3180
synapse
1
257,656
28
10
6
102
12
0
30
79
test_query_by_embedding_excluded_meta_data_return_embedding_true
Use opensearch-py in OpenSearchDocumentStore (#2691) * add Opensearch extras * let OpenSearchDocumentStore use opensearch-py * Update Documentation & Code Style * fix a bug found after adding tests Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Sara Zan <[email protected]>
https://github.com/deepset-ai/haystack.git
def test_query_by_embedding_excluded_meta_data_return_embedding_true(self, mocked_document_store): mocked_document_store.return_embedding = True mocked_document_store.excluded_meta_data = ["foo", "embedding"] mocked_document_store.query_by_embedding(self.query_emb) _, kwargs = mocked_document_store.client.search.call_args # we expect "embedding" was removed from the final query assert kwargs["body"]["_source"] == {"excludes": ["foo"]}
57
test_opensearch.py
Python
test/document_stores/test_opensearch.py
e7627c3f8b241654b61f8523479c81f855102f0a
haystack
1
166,353
62
13
30
325
34
0
106
262
write_to_compressed
ENH: add support for reading .tar archives (#44787) * Add reproduction test for .tar.gz archives co-authored-by: Margarete Dippel <[email protected]> * add support for .tar archives python's `tarfile` supports gzip, xz and bz2 encoding, so we don't need to make any special cases for that. co-authored-by: Margarete Dippel <[email protected]> * update doc comments * fix: pep8 errors * refactor: flip _compression_to_extension around to support multiple extensions on same compression co-authored-by: Margarete Dippel <[email protected] y.github.com> * refactor: detect tar files using existing extension mapping co-authored-by: Margarete Dippel <[email protected]> * feat: add support for writing tar files co-authored-by: Margarete Dippel <[email protected]> * feat: assure it respects .gz endings * feat: add "tar" entry to compressionoptions * chore: add whatsnew entry * fix: test_compression_size_fh * add tarfile to shared compression docs * fix formatting * pass through "mode" via compression args * fix pickle test * add class comment * sort imports * add _compression_to_extension back for backwards compatibility * fix some type warnings * fix: formatting * fix: mypy complaints * fix: more tests * fix: some error with xml * fix: interpreted text role * move to v1.5 whatsnw * add versionadded note * don't leave blank lines * add tests for zero files / multiple files * move _compression_to_extension to tests * revert added "mode" argument * add test to ensure that `compression.mode` works * compare strings, not bytes * replace carriage returns Co-authored-by: Margarete Dippel <[email protected]>
https://github.com/pandas-dev/pandas.git
def write_to_compressed(compression, path, data, dest="test"): args: tuple[Any, ...] = (data,) mode = "wb" method = "write" compress_method: Callable if compression == "zip": compress_method = zipfile.ZipFile mode = "w" args = (dest, data) method = "writestr" elif compression == "tar": compress_method = tarfile.TarFile mode = "w" file = tarfile.TarInfo(name=dest) bytes = io.BytesIO(data) file.size = len(data) args = (file, bytes) method = "addfile" elif compression == "gzip": compress_method = gzip.GzipFile elif compression == "bz2": compress_method = bz2.BZ2File elif compression == "zstd": compress_method = import_optional_dependency("zstandard").open elif compression == "xz": compress_method = get_lzma_file() else: raise ValueError(f"Unrecognized compression type: {compression}") with compress_method(path, mode=mode) as f: getattr(f, method)(*args) # ------------------------------------------------------------------ # Plotting
181
_io.py
Python
pandas/_testing/_io.py
864729813a0203af8bb0d30b6c883588ae2c96f8
pandas
7
310,960
41
13
16
212
21
0
55
191
extra_state_attributes
Add data update coordinator to Whois (#64846) Co-authored-by: Joakim Sørensen <[email protected]>
https://github.com/home-assistant/core.git
def extra_state_attributes(self) -> dict[str, int | float | None] | None: # Only add attributes to the original sensor if self.entity_description.key != "days_until_expiration": return None if self.coordinator.data is None: return None attrs = { ATTR_EXPIRES: self.coordinator.data.expiration_date.isoformat(), } if self.coordinator.data.name_servers: attrs[ATTR_NAME_SERVERS] = " ".join(self.coordinator.data.name_servers) if self.coordinator.data.last_updated: attrs[ATTR_UPDATED] = self.coordinator.data.last_updated.isoformat() if self.coordinator.data.registrar: attrs[ATTR_REGISTRAR] = self.coordinator.data.registrar return attrs
133
sensor.py
Python
homeassistant/components/whois/sensor.py
d15d081646c26d32f860d8f84b4f29d848dab148
core
6
43,244
9
6
11
23
4
0
9
30
get_conn
fix: RedshiftDataHook and RdsHook not use cached connection (#24387)
https://github.com/apache/airflow.git
def get_conn(self) -> BaseAwsConnection: # Compat shim return self.conn
12
base_aws.py
Python
airflow/providers/amazon/aws/hooks/base_aws.py
796e0a0b525def2f24d41fc0b5f4dfbe40b29e9e
airflow
1
119,831
155
17
54
700
50
1
293
463
polyfit
lax_numpy: move poly functions into numpy.polynomial
https://github.com/google/jax.git
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): _check_arraylike("polyfit", x, y) deg = core.concrete_or_error(int, deg, "deg must be int") order = deg + 1 # check arguments if deg < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") if x.size == 0: raise TypeError("expected non-empty vector for x") if y.ndim < 1 or y.ndim > 2: raise TypeError("expected 1D or 2D array for y") if x.shape[0] != y.shape[0]: raise TypeError("expected x and y to have same length") # set rcond if rcond is None: rcond = len(x) * finfo(x.dtype).eps rcond = core.concrete_or_error(float, rcond, "rcond must be float") # set up least squares equation for powers of x lhs = vander(x, order) rhs = y # apply weighting if w is not None: _check_arraylike("polyfit", w) w, = _promote_dtypes_inexact(w) if w.ndim != 1: raise TypeError("expected a 1-d array for weights") if w.shape[0] != y.shape[0]: raise TypeError("expected w and y to have the same length") lhs *= w[:, np.newaxis] if rhs.ndim == 2: rhs *= w[:, np.newaxis] else: rhs *= w # scale lhs to improve condition number and solve scale = sqrt((lhs*lhs).sum(axis=0)) lhs /= scale[np.newaxis,:] c, resids, rank, s = linalg.lstsq(lhs, rhs, rcond) c = (c.T/scale).T # broadcast scale coefficients if full: return c, resids, rank, s, rcond elif cov: Vbase = linalg.inv(dot(lhs.T, lhs)) Vbase /= outer(scale, scale) if cov == "unscaled": fac = 1 else: if len(x) <= order: raise ValueError("the number of data points must exceed order " "to scale the covariance matrix") fac = resids / (len(x) - order) fac = fac[0] #making np.array() of shape (1,) to int if y.ndim == 1: return c, Vbase * fac else: return c, Vbase[:, :, np.newaxis] * fac else: return c _POLY_DOC = @_wraps(np.poly, lax_description=_POLY_DOC) @jit
@_wraps(np.poly, lax_description=_POLY_DOC) @jit
424
polynomial.py
Python
jax/_src/numpy/polynomial.py
603bb3c5ca288674579211e64fa47c6b2b0fb7a6
jax
17
195,108
117
18
42
437
49
0
150
639
select_paths
Add CUDA Kernel for TreeSearch Ngram Blocking (#4633) * add cuda and cpp code for ngram blocking * add python wrapper * modify agent to use cuda kernel for self-blocking * add context blocking * change load paths * add ninja to requirement * modify setup script to install kernel ahead of time * change circleci test to use gpu to build website * change back to JIT, switch directory when loadingcuda moddule * add check for cuda * get rid of ninja * remove unused param * move "hyps to cuda" into _block_ngrams() * set gpu_beam_blocking as attribute for TreeSearch, modify block_list function to cast into list, set current ngram_size for context blocking, move path to cpu when needed * fix lint formatting issues * add init file to new folders * add new line at end of file * new lint errors * add ninja * set protobuf * cast tensor to list in to pass gpu tests * debug long gpu tests * fix pointer bug in kernel code and change ngram_size param * add gpu unit tests and fix torch warning * skip gpu test unless cuda enabled * use tolist() for conversion * get rid of context's conversion to list, add check data before kernel code * Revert "get rid of context's conversion to list, add check data before kernel code" This reverts commit 9af834a435bcefb9bd2a049219fe078b7e62e9fd. * replace tensor with list for cpu code to make faster * remove unused import * change botocore version * change botocore again * Revert "change botocore again" This reverts commit a73241c06586015c7c38897fe7aea26e9bca7f16. * Revert "change botocore version" This reverts commit 38c8d97aabebc20b109995b1f0413baefe75fc26. * modify pacer set_batch_context * remove comments and outdated changes * add comments and copyright headers * format c++ and cu file
https://github.com/facebookresearch/ParlAI.git
def select_paths(self, logprobs, prior_scores, current_length) -> _PathSelection: # if numel is 1, then this is the first time step, only one hyp is expanded if prior_scores.numel() == 1: logprobs = logprobs[0:1] # beam search actually looks over all hypotheses together so we flatten beam_scores = logprobs + prior_scores.unsqueeze(1).expand_as(logprobs) flat_beam_scores = beam_scores.view(-1) best_scores, best_idxs = torch.topk(flat_beam_scores, self.beam_size, dim=-1) voc_size = logprobs.size(-1) # get the backtracking hypothesis id as a multiple of full voc_sizes hyp_ids = torch.div(best_idxs, voc_size, rounding_mode='trunc') # get the actual word id from residual of the same division tok_ids = best_idxs % voc_size token_details: Optional[List[_PathSelectionTokenDetails]] = None if self.verbose: probs = torch.softmax(logprobs, dim=-1) tok_probs = ( torch.index_select(probs, 0, hyp_ids) .gather(1, tok_ids.unsqueeze(1)) .view(-1) ) tok_ranks = ( probs.argsort(1, descending=True) .argsort(1) .view(-1) .gather(0, best_idxs) ) token_details = [] for tok_logprob, tok_rank in zip( tok_probs.log().cpu().numpy(), tok_ranks.cpu().numpy() ): token_details.append( { "token_logprob": tok_logprob.item(), "token_rank": int(tok_rank.item()), } ) return _PathSelection( hypothesis_ids=hyp_ids, token_ids=tok_ids, scores=best_scores, token_details=token_details, )
277
torch_generator_agent.py
Python
parlai/core/torch_generator_agent.py
dff9aabb5024c30c81e146cebffbc88bc6431b61
ParlAI
4
163,416
26
15
11
119
14
0
33
138
_format_attrs
REF: improve rendering of categories in CategoricalIndex (#45340)
https://github.com/pandas-dev/pandas.git
def _format_attrs(self): attrs: list[tuple[str, str | int | bool | None]] attrs = [ ( "categories", "[" + ", ".join(self._data._repr_categories()) + "]", ), ("ordered", self.ordered), ] extra = super()._format_attrs() return attrs + extra
70
category.py
Python
pandas/core/indexes/category.py
a377f03b190d2802b0061669e8676450205bc479
pandas
1
262,880
42
11
11
100
11
0
49
102
get_package_paths
hookutils: support multiple package paths in collect_* helpers Split the functionality of ``get_package_paths`` into two new helpers, ``get_all_package_paths`` and ``package_base_path``. The former obtains all package paths, while the latter simplifies removal of package-specific sub-path from the full package-path. Implement the old, backwards-compatible ``get_package_paths`` using these helpers; the function now supports namespace packages, but always returns a single package path and its base path. Have ``collect_submodules``, ``collect_dynamic_libs``, and ``collect_data_files`` helpers use the new ``get_all_package_paths`` and extend them to process all returned package paths. This enables proper support for PEP420 namespace packages with multiple package paths.
https://github.com/pyinstaller/pyinstaller.git
def get_package_paths(package): pkg_paths = get_all_package_paths(package) if not pkg_paths: raise ValueError(f"Package '{package}' does not exist or is not a package!") if len(pkg_paths) > 1: logger.warning( "get_package_paths - package %s has multiple paths (%r); returning only first one!", package, pkg_paths ) pkg_dir = pkg_paths[0] pkg_base = package_base_path(pkg_dir, package) return pkg_base, pkg_dir
58
__init__.py
Python
PyInstaller/utils/hooks/__init__.py
e232aaf089d150b085502b97ce0fcf699b45e1b2
pyinstaller
3
35,794
13
10
22
57
9
0
13
38
_resize
Maskformer (#15682) * maskformer * conflicts * conflicts * minor fixes * feature extractor test fix refactor MaskFormerLoss following conversation MaskFormer related types should not trigger a module time import error missed one removed all the types that are not used update config mapping minor updates in the doc resolved conversation that doesn't need a discussion minor changes resolved conversations fixed DetrDecoder * minor changes minor changes fixed mdx file test feature_extractor return types functional losses -> classes removed the return type test for the feature extractor minor changes + style + quality * conflicts? * rebase master * readme * added missing files * deleded poolformers test that where in the wrong palce * CI * minor changes * Apply suggestions from code review Co-authored-by: NielsRogge <[email protected]> * resolved conversations * minor changes * conversations [Unispeech] Fix slow tests (#15818) * remove soundfile old way of loading audio * Adapt slow test [Barthez Tokenizer] Fix saving (#15815) [TFXLNet] Correct tf xlnet generate (#15822) * [TFXLNet] Correct tf xlnet * adapt test comment Fix the push run (#15807) Fix semantic segmentation pipeline test (#15826) Fix dummy_inputs() to dummy_inputs in symbolic_trace doc (#15776) Add model specific output classes to PoolFormer model docs (#15746) * Added model specific output classes to poolformer docs * Fixed Segformer typo in Poolformer docs Adding the option to return_timestamps on pure CTC ASR models. (#15792) * Adding the option to return_timestamps on pure CTC ASR models. * Remove `math.prod` which was introduced in Python 3.8 * int are not floats. * Reworking the PR to support "char" vs "word" output. * Fixup! * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <[email protected]> * Update src/transformers/pipelines/automatic_speech_recognition.py Co-authored-by: Patrick von Platen <[email protected]> * Quality. Co-authored-by: Patrick von Platen <[email protected]> HFTracer.trace should use/return self.graph to be compatible with torch.fx.Tracer (#15824) Fix tf.concatenate + test past_key_values for TF models (#15774) * fix wrong method name tf.concatenate * add tests related to causal LM / decoder * make style and quality * clean-up * Fix TFBertModel's extended_attention_mask when past_key_values is provided * Fix tests * fix copies * More tf.int8 -> tf.int32 in TF test template * clean-up * Update TF test template * revert the previous commit + update the TF test template * Fix TF template extended_attention_mask when past_key_values is provided * Fix some styles manually * clean-up * Fix ValueError: too many values to unpack in the test * Fix more: too many values to unpack in the test * Add a comment for extended_attention_mask when there is past_key_values * Fix TFElectra extended_attention_mask when past_key_values is provided * Add tests to other TF models * Fix for TF Electra test: add prepare_config_and_inputs_for_decoder * Fix not passing training arg to lm_head in TFRobertaForCausalLM * Fix tests (with past) for TF Roberta * add testing for pask_key_values for TFElectra model Co-authored-by: ydshieh <[email protected]> [examples/summarization and translation] fix readme (#15833) Add ONNX Runtime quantization for text classification notebook (#15817) Re-enable doctests for the quicktour (#15828) * Re-enable doctests for the quicktour * Re-enable doctests for task_summary (#15830) * Remove & Framework split model report (#15825) Add TFConvNextModel (#15750) * feat: initial implementation of convnext in tensorflow. * fix: sample code for the classification model. * chore: added checked for from the classification model. * chore: set bias initializer in the classification head. * chore: updated license terms. * chore: removed ununsed imports * feat: enabled argument during using drop_path. * chore: replaced tf.identity with layers.Activation(linear). * chore: edited default checkpoint. * fix: minor bugs in the initializations. * partial-fix: tf model errors for loading pretrained pt weights. * partial-fix: call method updated * partial-fix: cross loading of weights (4x3 variables to be matched) * chore: removed unneeded comment. * removed playground.py * rebasing * rebasing and removing playground.py. * fix: renaming TFConvNextStage conv and layer norm layers * chore: added initializers and other minor additions. * chore: added initializers and other minor additions. * add: tests for convnext. * fix: integration tester class. * fix: issues mentioned in pr feedback (round 1). * fix: how output_hidden_states arg is propoagated inside the network. * feat: handling of arg for pure cnn models. * chore: added a note on equal contribution in model docs. * rebasing * rebasing and removing playground.py. * feat: encapsulation for the convnext trunk. * Fix variable naming; Test-related corrections; Run make fixup * chore: added Joao as a contributor to convnext. * rebasing * rebasing and removing playground.py. * rebasing * rebasing and removing playground.py. * chore: corrected copyright year and added comment on NHWC. * chore: fixed the black version and ran formatting. * chore: ran make style. * chore: removed from_pt argument from test, ran make style. * rebasing * rebasing and removing playground.py. * rebasing * rebasing and removing playground.py. * fix: tests in the convnext subclass, ran make style. * rebasing * rebasing and removing playground.py. * rebasing * rebasing and removing playground.py. * chore: moved convnext test to the correct location * fix: locations for the test file of convnext. * fix: convnext tests. * chore: applied sgugger's suggestion for dealing w/ output_attentions. * chore: added comments. * chore: applied updated quality enviornment style. * chore: applied formatting with quality enviornment. * chore: revert to the previous tests/test_modeling_common.py. * chore: revert to the original test_modeling_common.py * chore: revert to previous states for test_modeling_tf_common.py and modeling_tf_utils.py * fix: tests for convnext. * chore: removed output_attentions argument from convnext config. * chore: revert to the earlier tf utils. * fix: output shapes of the hidden states * chore: removed unnecessary comment * chore: reverting to the right test_modeling_tf_common.py. * Styling nits Co-authored-by: ariG23498 <[email protected]> Co-authored-by: Joao Gante <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]> * minor changes * doc fix in feature extractor * doc * typose * removed detr logic from config * removed detr logic from config * removed num_labels * small fix in the config * auxilary -> auxiliary * make style * some test is failing * fix a weird char in config prevending doc-builder * retry to fix the doc-builder issue * make style * new try to fix the doc builder * CI * change weights to facebook Co-authored-by: NielsRogge <[email protected]> Co-authored-by: ariG23498 <[email protected]> Co-authored-by: Joao Gante <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]>
https://github.com/huggingface/transformers.git
def _resize(self, image, size, target=None, max_size=None): if not isinstance(image, Image.Image): image = self.to_pil_image(image)
219
feature_extraction_maskformer.py
Python
src/transformers/models/maskformer/feature_extraction_maskformer.py
d83d22f578276e9f201b0b3b0f8f9bd68e86c133
transformers
5
83,181
83
10
19
324
20
0
126
365
test_guest_user_subscribe
docs: Consistently hyphenate “web-public”. In English, compound adjectives should essentially always be hyphenated. This makes them easier to parse, especially for users who might not recognize that the words “web public” go together as a phrase. Signed-off-by: Anders Kaseorg <[email protected]>
https://github.com/zulip/zulip.git
def test_guest_user_subscribe(self) -> None: guest_user = self.example_user("polonius") result = self.common_subscribe_to_streams(guest_user, ["Denmark"], allow_fail=True) self.assert_json_error(result, "Not allowed for guest users") # Verify the internal checks also block guest users. stream = get_stream("Denmark", guest_user.realm) self.assertEqual(filter_stream_authorization(guest_user, [stream]), ([], [stream])) stream = self.make_stream("private_stream", invite_only=True) result = self.common_subscribe_to_streams(guest_user, ["private_stream"], allow_fail=True) self.assert_json_error(result, "Not allowed for guest users") self.assertEqual(filter_stream_authorization(guest_user, [stream]), ([], [stream])) web_public_stream = self.make_stream("web_public_stream", is_web_public=True) public_stream = self.make_stream("public_stream", invite_only=False) private_stream = self.make_stream("private_stream2", invite_only=True) # This test should be added as soon as the subscription endpoint allows # guest users to subscribe to web-public streams. Although they are already # authorized, the decorator in "add_subscriptions_backend" still needs to be # deleted. # # result = self.common_subscribe_to_streams(guest_user, ['web_public_stream'], # is_web_public=True, allow_fail=True) # self.assert_json_success(result) streams_to_sub = [web_public_stream, public_stream, private_stream] self.assertEqual( filter_stream_authorization(guest_user, streams_to_sub), ([web_public_stream], [public_stream, private_stream]), )
199
test_subs.py
Python
zerver/tests/test_subs.py
90e202cd38d00945c81da4730d39e3f5c5b1e8b1
zulip
1
243,994
29
12
11
147
16
0
37
146
get_classes_from_csv
[Feature] Support OpenImages Dataset (#6331) * [Feature] support openimage group of eval * [Feature] support openimage group of eval * support openimage dataset * support openimage challenge dataset * fully support OpenImages-V6 and OpenImages Challenge 2019 * Fix some logic error * update config file * fix get data_infos error * fully support OpenImages evaluation * update OpenImages config files * [Feature] support OpenImages datasets * fix bug * support load image metas from pipeline * fix bug * fix get classes logic error * update code * support get image metas * support openimags * support collect image metas * support Open Images * fix openimages logic * minor fix * add a new function to compute openimages tpfp * minor fix * fix ci error * minor fix * fix indication * minor fix * fix returns * fix returns * fix returns * fix returns * fix returns * minor fix * update readme * support loading image level labels and fix some logic * minor fix * minor fix * add class names * minor fix * minor fix * minor fix * add openimages test unit * minor fix * minor fix * fix test unit * minor fix * fix logic error * minor fix * fully support openimages * minor fix * fix docstring * fix docstrings in readthedocs * update get image metas script * label_description_file -> label_file * update openimages readme * fix test unit * fix test unit * minor fix * update readme file * Update get_image_metas.py
https://github.com/open-mmlab/mmdetection.git
def get_classes_from_csv(self, label_file): index_list = [] classes_names = [] with open(label_file, 'r') as f: reader = csv.reader(f) for line in reader: self.cat2label[line[0]] = line[1] classes_names.append(line[1]) index_list.append(line[0]) self.index_dict = {index: i for i, index in enumerate(index_list)} return classes_names
91
openimages.py
Python
mmdet/datasets/openimages.py
1516986a616fee8bb741d0ab2be40683045efccd
mmdetection
3
107,794
28
12
21
142
22
0
31
152
subprocess_run_helper
TST: re-arrange sub-process tests to be able to get coverage on them By putting the implementation in top-level functions and then importing the test module in the sub-process we are able to get accurate coverage on these tests. pytest-cov takes care of all of the coverage related magic implicitly. Also get coverage information out of isolated tk tests. Co-authored-by: Elliott Sales de Andrade <[email protected]>
https://github.com/matplotlib/matplotlib.git
def subprocess_run_helper(func, *args, timeout, **extra_env): target = func.__name__ module = func.__module__ proc = subprocess.run( [sys.executable, "-c", f, *args], env={ **os.environ, "SOURCE_DATE_EPOCH": "0", **extra_env }, timeout=timeout, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) return proc
86
__init__.py
Python
lib/matplotlib/testing/__init__.py
efc7f81cf0ee0f9f2875bd1dc5eabf48b06ae14e
matplotlib
1
245,726
65
13
22
449
27
0
131
297
encode
[Refactor] Refactor anchor head and base head with boxlist (#8625) * Refactor anchor head * Update * Update * Update * Add a series of boxes tools * Fix box type to support n x box_dim boxes * revert box type changes * Add docstring * refactor retina_head * Update * Update * Fix comments * modify docstring of coder and ioucalculator * Replace with_boxlist with use_box_type
https://github.com/open-mmlab/mmdetection.git
def encode(self, bboxes, gt_bboxes, stride): bboxes = get_box_tensor(bboxes) gt_bboxes = get_box_tensor(gt_bboxes) assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5 y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5 w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0] h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1] x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5 y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5 w = bboxes[..., 2] - bboxes[..., 0] h = bboxes[..., 3] - bboxes[..., 1] w_target = torch.log((w_gt / w).clamp(min=self.eps)) h_target = torch.log((h_gt / h).clamp(min=self.eps)) x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp( self.eps, 1 - self.eps) y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp( self.eps, 1 - self.eps) encoded_bboxes = torch.stack( [x_center_target, y_center_target, w_target, h_target], dim=-1) return encoded_bboxes
321
yolo_bbox_coder.py
Python
mmdet/models/task_modules/coders/yolo_bbox_coder.py
d915740fa8228cf57741b27d9e5d66e358456b8e
mmdetection
1
176,973
25
11
6
91
8
0
33
55
out_degree_centrality
added examples to degree_alg.py (#5644) * added example on degree centrality * added example on in degree centrality * added example on out degree centrality * added opening braces
https://github.com/networkx/networkx.git
def out_degree_centrality(G): if len(G) <= 1: return {n: 1 for n in G} s = 1.0 / (len(G) - 1.0) centrality = {n: d * s for n, d in G.out_degree()} return centrality
61
degree_alg.py
Python
networkx/algorithms/centrality/degree_alg.py
b8d1438e4ea3d8190c650110b3b7d7c141224842
networkx
4
276,100
46
17
17
166
14
0
65
217
tracing_scope
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def tracing_scope(): # This enables the LayerCallCollection's tracing mechanism to trace all call # functions in the collection. previous_value = _thread_local_data.enable_call_tracing previous_queue = _thread_local_data.trace_queue try: _thread_local_data.enable_call_tracing = True _thread_local_data.trace_queue = [] yield finally: # Run traces from the queue. while _thread_local_data.trace_queue: fn, args, kwargs, training = _thread_local_data.trace_queue.pop() if training is not None: with backend.deprecated_internal_learning_phase_scope(training): fn.get_concrete_function(*args, **kwargs) else: fn.get_concrete_function(*args, **kwargs) _thread_local_data.trace_queue = previous_queue _thread_local_data.enable_call_tracing = previous_value
97
save_impl.py
Python
keras/saving/saved_model/save_impl.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
4
8,267
38
13
11
139
8
0
49
92
Deprecated
Add API Annotations to Ludwig (#2596) * Modified annotations.py from Ray for Ludwig * minor cleanup * address feedback * Add reference to Ray in LICENSE * remove args * add annotation message * address comments
https://github.com/ludwig-ai/ludwig.git
def Deprecated(*args, **kwargs): if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): return Deprecated()(args[0]) message = "**DEPRECATED:** This API is deprecated and may be removed in a future Ludwig release." if "message" in kwargs: message += " " + kwargs["message"] del kwargs["message"] if kwargs: raise ValueError(f"Unknown kwargs: {kwargs.keys()}")
77
api_annotations.py
Python
ludwig/api_annotations.py
0c30938d0eeb2383a141012800652e5d59d4aa18
ludwig
6
262,617
18
12
4
65
10
0
19
59
on_epoch_start
Minors bug fixes on VITS/YourTTS and inference (#2054) * Set the right device to the speaker encoder * Bug fix on inference list_language_idxs parameter * Bug fix on speaker encoder resample audio transform
https://github.com/coqui-ai/TTS.git
def on_epoch_start(self, trainer): # pylint: disable=W0613 self._freeze_layers() # set the device of speaker encoder if self.args.use_speaker_encoder_as_loss: self.speaker_manager.encoder = self.speaker_manager.encoder.to(self.device)
38
vits.py
Python
TTS/tts/models/vits.py
f3b947e7066083f97f34ff1bc40911389fd52154
TTS
2
320,361
11
9
4
59
8
0
12
40
test_multi_part_language
Fixes language code checks around two part languages
https://github.com/paperless-ngx/paperless-ngx.git
def test_multi_part_language(self, m): m.return_value = ["chi_sim", "eng"] msgs = check_default_language_available(None) self.assertEqual(len(msgs), 0)
34
test_checks.py
Python
src/paperless_tesseract/tests/test_checks.py
55ef0d4a1b62c3abe8500cad97ddeecf9f746b84
paperless-ngx
1
167,736
6
7
5
25
4
0
6
20
kind
TYP: more return annotations in core/ (#47618) * TYP: more return annotations in core/ * from __future__ import annotations * more __future__
https://github.com/pandas-dev/pandas.git
def kind(self) -> str: return self.subtype.kind
14
dtype.py
Python
pandas/core/arrays/sparse/dtype.py
f65417656ba8c59438d832b6e2a431f78d40c21c
pandas
1
208,148
39
14
12
222
17
0
55
171
test_chord_clone_kwargs
BLM-2: Adding unit tests to chord clone (#7668) * Added .python-version and .vscode to .gitignore * Added test_chord_clone_kwargs() to verify chord cloning treats kwargs correctly * Happify linter
https://github.com/celery/celery.git
def test_chord_clone_kwargs(self, subtests): with subtests.test(msg='Verify chord cloning clones kwargs correctly'): c = chord([signature('g'), signature('h')], signature('i'), kwargs={'U': 6}) c2 = c.clone() assert c2.kwargs == c.kwargs with subtests.test(msg='Cloning the chord with overridden kwargs'): override_kw = {'X': 2} c3 = c.clone(args=(1,), kwargs=override_kw) with subtests.test(msg='Verify the overridden kwargs were cloned correctly'): new_kw = c.kwargs.copy() new_kw.update(override_kw) assert c3.kwargs == new_kw
127
test_canvas.py
Python
t/unit/tasks/test_canvas.py
c3c6594b4cdea898abba218f576a669700dba98d
celery
1
315,391
6
6
3
22
4
0
6
20
source
Add instance attributes to GeolocationEvent (#74389)
https://github.com/home-assistant/core.git
def source(self) -> str: return self._attr_source
12
__init__.py
Python
homeassistant/components/geo_location/__init__.py
18840c8af59bfd12c262ca1c6bb68a4cb5f0445c
core
1
271,365
9
11
4
53
9
0
9
25
is_input_keras_tensor
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def is_input_keras_tensor(tensor): if not node_module.is_keras_tensor(tensor): raise ValueError(_KERAS_TENSOR_TYPE_CHECK_ERROR_MSG.format(tensor)) return tensor.node.is_input
31
functional_utils.py
Python
keras/engine/functional_utils.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
2
250,281
15
11
7
72
12
0
16
62
test_delete_missing_version
Add missing type hints to tests.handlers. (#14680) And do not allow untyped defs in tests.handlers.
https://github.com/matrix-org/synapse.git
def test_delete_missing_version(self) -> None: e = self.get_failure( self.handler.delete_version(self.local_user, "1"), SynapseError ) res = e.value.code self.assertEqual(res, 404)
44
test_e2e_room_keys.py
Python
tests/handlers/test_e2e_room_keys.py
652d1669c5a103b1c20478770c4aaf18849c09a3
synapse
1
200,266
72
17
26
301
23
0
109
276
ldescent
replaced some broken reference links in doc with working ones
https://github.com/sympy/sympy.git
def ldescent(A, B): if abs(A) > abs(B): w, y, x = ldescent(B, A) return w, x, y if A == 1: return (1, 1, 0) if B == 1: return (1, 0, 1) if B == -1: # and A == -1 return r = sqrt_mod(A, B) Q = (r**2 - A) // B if Q == 0: B_0 = 1 d = 0 else: div = divisors(Q) B_0 = None for i in div: sQ, _exact = integer_nthroot(abs(Q) // i, 2) if _exact: B_0, d = sign(Q)*i, sQ break if B_0 is not None: W, X, Y = ldescent(A, B_0) return _remove_gcd((-A*X + r*W), (r*X - W), Y*(B_0*d))
190
diophantine.py
Python
sympy/solvers/diophantine/diophantine.py
af5e5abd15bb0e914c620a36c74a7555348cd37e
sympy
9
209,603
84
14
20
192
24
0
118
434
_send_get_slave_id
Add Automotive Logger for all debug outputs of the automotive layer
https://github.com/secdev/scapy.git
def _send_get_slave_id(self, identifier): # type: (int) -> List[XCPScannerResult] all_slaves = [] body = TransportLayerCmd() / TransportLayerCmdGetSlaveId() xcp_req_and_res_list = \ self._scan( identifier, body, 0xF2, TransportLayerCmdGetSlaveIdResponse) for req_and_res in xcp_req_and_res_list: response = req_and_res[1] # The protocol will also mark other XCP messages that might be # send as TransportLayerCmdGetSlaveIdResponse # -> Payload must be checked. It must include XCP if response.position_1 != 0x58 or response.position_2 != 0x43 or \ response.position_3 != 0x50: continue # Identifier that the master must use to send packets to the slave # and the slave will answer with request_id = \ response["TransportLayerCmdGetSlaveIdResponse"].can_identifier result = XCPScannerResult(request_id=request_id, response_id=response.identifier) all_slaves.append(result) log_automotive.info( "Detected XCP slave for broadcast identifier: " + str( identifier) + "\nResponse: " + str(result)) return all_slaves
117
scanner.py
Python
scapy/contrib/automotive/xcp/scanner.py
495b21f2867e48286767085c8cf2918e4092e9dc
scapy
5
104,422
4
7
2
22
3
0
4
18
num_rows
Update docs to new frontend/UI (#3690) * WIP: update docs to new UI * make style * Rm unused * inject_arrow_table_documentation __annotations__ * hasattr(arrow_table_method, "__annotations__") * Update task_template.rst * Codeblock PT-TF-SPLIT * Convert loading scripts * Convert docs to mdx * Fix mdx * Add <Tip> * Convert mdx tables * Fix codeblock * Rm unneded hashlinks * Update index.mdx * Redo dev change * Rm circle ci `build_doc` & `deploy_doc` * Rm unneeded files * Update docs reamde * Standardize to `Example::` * mdx logging levels doc * Table properties inject_arrow_table_documentation * ``` to ```py mdx * Add Tips mdx * important,None -> <Tip warning={true}> * More misc * Center imgs * Update instllation page * `setup.py` docs section * Rm imgs since they are in hf.co * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * Update index mdx * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * just `Dataset` obj * Addedversion just italics * Update ReadInstruction doc example syntax * Change docstring for `prepare_for_task` * Chore * Remove `code` syntax from headings * Rm `code` syntax from headings * Hashlink backward compatability * S3FileSystem doc * S3FileSystem doc updates * index.mdx updates * Add darkmode gifs * Index logo img css classes * Index mdx dataset logo img size * Docs for DownloadMode class * Doc DownloadMode table * format docstrings * style * Add doc builder scripts (#3790) * add doc builder scripts * fix docker image * Docs new UI actions no self hosted (#3793) * No self hosted * replace doc injection by actual docstrings * Docstring formatted Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> Co-authored-by: Lysandre Debut <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> * Rm notebooks from docs actions since they dont exi * Update tsting branch * More docstring * Chore * bump up node version * bump up node * ``` -> ```py for audio_process.mdx * Update .github/workflows/build_documentation.yml Co-authored-by: Quentin Lhoest <[email protected]> * Uodate dev doc build * remove run on PR * fix action * Fix gh doc workflow * forgot this change when merging master * Update build doc Co-authored-by: Steven Liu <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Lysandre Debut <[email protected]>
https://github.com/huggingface/datasets.git
def num_rows(self): return self.table.num_rows
12
table.py
Python
src/datasets/table.py
e35be138148333078284b942ccc9ed7b1d826f97
datasets
1
244,031
28
11
7
146
13
0
37
86
binary_mask_dice_loss
[Feature] Add Maskformer to mmdet (#7212) * first commit * add README * move model description from config to readme add description for binary_input add description for dice loss add a independent panoptic gt processing function add a independent panoptic gt processing function remove compatibility of pretrain in maskformer * update comments in maskformer_head * update docs format
https://github.com/open-mmlab/mmdetection.git
def binary_mask_dice_loss(self, mask_preds, gt_masks): mask_preds = mask_preds.flatten(1) gt_masks = gt_masks.flatten(1).float() numerator = 2 * torch.einsum('nc,mc->nm', mask_preds, gt_masks) denominator = mask_preds.sum(-1)[:, None] + gt_masks.sum(-1)[None, :] loss = 1 - (numerator + self.eps) / (denominator + self.eps) return loss
92
match_cost.py
Python
mmdet/core/bbox/match_costs/match_cost.py
cac356380d505bf15587f07c0529218cc36b9652
mmdetection
1
108,043
22
8
59
37
8
0
23
32
_mark_every_path
FIX: allow float markevery with nans in line data in _mark_every_path() - TST: new test_markevery_linear_scales_nans() test + baseline images
https://github.com/matplotlib/matplotlib.git
def _mark_every_path(markevery, tpath, affine, ax): # pull out the two bits of data we want from the path codes, verts = tpath.codes, tpath.vertices
485
lines.py
Python
lib/matplotlib/lines.py
99d9475dae7679cc99457f2f804665cfff972639
matplotlib
13
22,857
76
13
18
173
19
0
104
266
hear
VoiceAssistant This is Voice Assistant coded using Python which can do the following: - 1. Speak Text entered by User. 2. Search anything on Google. 3. Search anything on Wikipedia. 4. Read an MS Word(docx) document. 5. Read a book(PDF). 6. Can be used as a Dictator.
https://github.com/geekcomputers/Python.git
def hear(): r = sr.Recognizer() r.pause_threshold = 1 # a pause of more than 1 second will stop the microphone temporarily r.energy_threshold = 300 # python by default sets it to 300. It is the minimum input energy to be considered. r.dynamic_energy_threshold = True # pyhton now can dynamically change the threshold energy with sr.Microphone() as source: # read the audio data from the default microphone print(Fore.RED + "\nListening...") #time.sleep(0.5) speech = r.record(source, duration = 9) # option #speech = r.listen(source) # convert speech to text try: #print("Recognizing...") recognizing() speech = r.recognize_google(speech) print(speech + "\n") except Exception as exception: print(exception) return "None" return speech
89
speakListen.py
Python
VoiceAssistant/Project_Basic_struct/speakListen.py
39c49e07066b2a53e176d555af6a7bf8aabb8a9c
Python
2
126,016
15
10
6
68
5
0
18
40
force_on_current_node
[AIR] Remove ML code from `ray.util` (#27005) Removes all ML related code from `ray.util` Removes: - `ray.util.xgboost` - `ray.util.lightgbm` - `ray.util.horovod` - `ray.util.ray_lightning` Moves `ray.util.ml_utils` to other locations Closes #23900 Signed-off-by: Amog Kamsetty <[email protected]> Signed-off-by: Kai Fricke <[email protected]> Co-authored-by: Kai Fricke <[email protected]>
https://github.com/ray-project/ray.git
def force_on_current_node(task_or_actor=None): node_resource_key = _get_current_node_resource_key() options = {"resources": {node_resource_key: 0.01}} if task_or_actor is None: return options return task_or_actor.options(**options)
41
node.py
Python
python/ray/tune/utils/node.py
862d10c162421706f77f73428429379a8b22fc38
ray
2
22,681
11
9
4
74
8
0
15
43
test_copy
refactor: clean code Signed-off-by: slowy07 <[email protected]>
https://github.com/geekcomputers/Python.git
def test_copy(self): x = Vector([1, 0, 0, 0, 0, 0]) y = x.copy() self.assertEqual(x.__str__(), y.__str__())
47
tests.py
Python
linear-algebra-python/src/tests.py
f0af0c43340763724f139fa68aa1e5a9ffe458b4
Python
1
88,240
80
13
31
221
15
0
122
478
_get_context
test: Add missing tests to sentry/relay/config/__init__.py [TET-504] (#41058) This PR increase code coverage from ~82% upto 98% in sentry/relay/config/__init__.py. codecov [report](https://app.codecov.io/gh/getsentry/sentry/pull/41058): <img width="1060" alt="image" src="https://user-images.githubusercontent.com/1374633/200516881-ed23da43-37df-4fc2-b291-310fc13f0ff5.png">
https://github.com/getsentry/sentry.git
def _get_context(self, key): if not key: return ({}, None, None) sdk_version = get_browser_sdk_version(key) # From JavaScript SDK version 7 onwards, the default bundle code is ES6, however, in the loader we # want to provide the ES5 version. This is why we need to modify the requested bundle name here. bundle_kind_modifier = "" if sdk_version >= Version("7.0.0"): bundle_kind_modifier = ".es5" js_sdk_loader_default_sdk_url_template_slot_count = ( settings.JS_SDK_LOADER_DEFAULT_SDK_URL.count("%s") ) try: if js_sdk_loader_default_sdk_url_template_slot_count == 2: sdk_url = settings.JS_SDK_LOADER_DEFAULT_SDK_URL % ( sdk_version, bundle_kind_modifier, ) elif js_sdk_loader_default_sdk_url_template_slot_count == 1: sdk_url = settings.JS_SDK_LOADER_DEFAULT_SDK_URL % (sdk_version,) else: sdk_url = settings.JS_SDK_LOADER_DEFAULT_SDK_URL except TypeError: sdk_url = "" # It fails if it cannot inject the version in the string return ( { "config": {"dsn": key.dsn_public}, "jsSdkUrl": sdk_url, "publicKey": key.public_key, }, sdk_version, sdk_url, )
130
js_sdk_loader.py
Python
src/sentry/web/frontend/js_sdk_loader.py
4821e6846b007cce0092f43141e4b436beb2bedc
sentry
6
259,079
44
9
8
110
7
0
60
90
test_normalized_mutual_info_score_bounded
FIX better handle limit cases in normalized_mutual_info_score (#22635)
https://github.com/scikit-learn/scikit-learn.git
def test_normalized_mutual_info_score_bounded(average_method): labels1 = [0] * 469 labels2 = [1] + labels1[1:] labels3 = [0, 1] + labels1[2:] # labels1 is constant. The mutual info between labels1 and any other labelling is 0. nmi = normalized_mutual_info_score(labels1, labels2, average_method=average_method) assert nmi == 0 # non constant, non perfect matching labels nmi = normalized_mutual_info_score(labels2, labels3, average_method=average_method) assert 0 <= nmi < 1
71
test_supervised.py
Python
sklearn/metrics/cluster/tests/test_supervised.py
020ee761c5c737e12a1e98897c7e4617271d0f66
scikit-learn
1
293,290
21
10
10
88
7
0
30
109
async_internal_added_to_hass
Prevent scene from restoring unavailable states (#67836)
https://github.com/home-assistant/core.git
async def async_internal_added_to_hass(self) -> None: await super().async_internal_added_to_hass() state = await self.async_get_last_state() if ( state is not None and state.state is not None and state.state != STATE_UNAVAILABLE ): self.__last_activated = state.state
52
__init__.py
Python
homeassistant/components/scene/__init__.py
c9ac0b49f6e0c566f97a053da6a242455ac40671
core
4
181,904
5
6
36
25
5
0
5
8
generate_import_code
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
https://github.com/EpistasisLab/tpot.git
def generate_import_code(pipeline, operators, impute=False, random_state=None):
222
export_utils.py
Python
tpot/export_utils.py
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
16
259,701
89
16
34
371
36
0
146
574
_minibatch_step
FEA Online implementation of non-negative matrix factorization (#16948) Co-authored-by: Tom Dupré la Tour <[email protected]> Co-authored-by: jeremie du boisberranger <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def _minibatch_step(self, X, W, H, update_H): batch_size = X.shape[0] # get scaled regularization terms. Done for each minibatch to take into account # variable sizes of minibatches. l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = self._scale_regularization(X) # update W if self.fresh_restarts or W is None: W = self._solve_W(X, H, self.fresh_restarts_max_iter) else: W, *_ = _multiplicative_update_w( X, W, H, self._beta_loss, l1_reg_W, l2_reg_W, self._gamma ) # necessary for stability with beta_loss < 1 if self._beta_loss < 1: W[W < np.finfo(np.float64).eps] = 0.0 batch_cost = ( _beta_divergence(X, W, H, self._beta_loss) + l1_reg_W * W.sum() + l1_reg_H * H.sum() + l2_reg_W * (W**2).sum() + l2_reg_H * (H**2).sum() ) / batch_size # update H (only at fit or fit_transform) if update_H: H[:] = _multiplicative_update_h( X, W, H, beta_loss=self._beta_loss, l1_reg_H=l1_reg_H, l2_reg_H=l2_reg_H, gamma=self._gamma, A=self._components_numerator, B=self._components_denominator, rho=self._rho, ) # necessary for stability with beta_loss < 1 if self._beta_loss <= 1: H[H < np.finfo(np.float64).eps] = 0.0 return batch_cost
253
_nmf.py
Python
sklearn/decomposition/_nmf.py
69132ebbd39f070590ca01813340b5b12c0d02ab
scikit-learn
6
107,591
38
14
15
172
10
0
57
281
set_rlim
Simplify impl. of polar limits setting API. AFAICT we can just inherit set_ylim. Also slightly improve the docs of set_rlim.
https://github.com/matplotlib/matplotlib.git
def set_rlim(self, bottom=None, top=None, emit=True, auto=False, **kwargs): if 'rmin' in kwargs: if bottom is None: bottom = kwargs.pop('rmin') else: raise ValueError('Cannot supply both positional "bottom"' 'argument and kwarg "rmin"') if 'rmax' in kwargs: if top is None: top = kwargs.pop('rmax') else: raise ValueError('Cannot supply both positional "top"' 'argument and kwarg "rmax"') return self.set_ylim(bottom=bottom, top=top, emit=emit, auto=auto, **kwargs)
101
polar.py
Python
lib/matplotlib/projections/polar.py
4dd64cfe842dae6647ec0289a23fb3074272b0b6
matplotlib
5
189,212
68
16
52
402
19
0
210
1,052
call
Delete extra whitespace A correction that does not affect the operation.
https://github.com/aws/aws-cli.git
def call(self, src_files, dest_files): # :var src_done: True if there are no more files from the source left. src_done = False # :var dest_done: True if there are no more files form the dest left. dest_done = False # :var src_take: Take the next source file from the generated files if # true src_take = True # :var dest_take: Take the next dest file from the generated files if # true dest_take = True while True: try: if (not src_done) and src_take: src_file = advance_iterator(src_files) except StopIteration: src_file = None src_done = True try: if (not dest_done) and dest_take: dest_file = advance_iterator(dest_files) except StopIteration: dest_file = None dest_done = True if (not src_done) and (not dest_done): src_take = True dest_take = True compare_keys = self.compare_comp_key(src_file, dest_file) if compare_keys == 'equal': should_sync = self._sync_strategy.determine_should_sync( src_file, dest_file ) if should_sync: yield src_file elif compare_keys == 'less_than': src_take = True dest_take = False should_sync = self._not_at_dest_sync_strategy.determine_should_sync(src_file, None) if should_sync: yield src_file elif compare_keys == 'greater_than': src_take = False dest_take = True should_sync = self._not_at_src_sync_strategy.determine_should_sync(None, dest_file) if should_sync: yield dest_file elif (not src_done) and dest_done: src_take = True should_sync = self._not_at_dest_sync_strategy.determine_should_sync(src_file, None) if should_sync: yield src_file elif src_done and (not dest_done): dest_take = True should_sync = self._not_at_src_sync_strategy.determine_should_sync(None, dest_file) if should_sync: yield dest_file else: break
239
comparator.py
Python
awscli/customizations/s3/comparator.py
8a16d7d8ce5e3f97fb100af7a960224f7f80137d
aws-cli
22
8,638
37
12
8
140
16
0
55
132
test_user_window_size
Enable dataset window autosizing (#2721) * set windowed shuffle for large datasets * documentation * update to automatic windowing flag * address reviews * address reviews * update logging info and add auto_window flag passthrough * update tests to use flag passthrough * more descriptive test class name * todo to add link to windowing docs * local test handling for dask import * handle RayDataset import in local tests * bad type annotation * bad type annotation
https://github.com/ludwig-ai/ludwig.git
def test_user_window_size(self, ray_cluster_small_object_store): # This pipeline should use the heuristic window size. ds = self.create_dataset(self.object_store_size * 8) pipe = ds.pipeline() rep = next(iter(pipe._base_iterable))() auto_num_blocks = rep.num_blocks() # This pipeline should have fewer windows but more blocks per window # than the autosized pipeline. pipe = ds.pipeline(window_size_bytes=self.auto_window_size * 2) rep = next(iter(pipe._base_iterable))() assert auto_num_blocks < rep.num_blocks()
82
test_ray.py
Python
tests/integration_tests/test_ray.py
0d19a48cff0958ed77926a0712cbdb6485d4034a
ludwig
1
288,716
6
7
3
22
3
0
6
12
_generate_client_device_id
Use persistent device id for jellyfin requests (#79840)
https://github.com/home-assistant/core.git
def _generate_client_device_id() -> str: return random_uuid_hex()
11
config_flow.py
Python
homeassistant/components/jellyfin/config_flow.py
5b0a37a44752edbbf785d6a200e3b7a3f5fa2047
core
1
176,268
22
11
7
77
8
0
27
46
find_cliques_recursive
Fix functions appearing in variables `__all__` but not in docs for NX2.7 (#5289) * Adjust functions appearing in `__all__` but not in docs * clean up coloring: merge two modules make interchange private * fix duplicate name. Probably should be changed * fix "see also" doc of recursive_simple_cycles * Rm internal uses of deprecated . * Fixup warnings filters regex. * clean up a bit more, make Node & AdjList private classes Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Mridul Seth <[email protected]>
https://github.com/networkx/networkx.git
def find_cliques_recursive(G): if len(G) == 0: return iter([]) adj = {u: {v for v in G[u] if v != u} for u in G} Q = []
63
clique.py
Python
networkx/algorithms/clique.py
17fa9942568bfca34d4a68f8d93c538014f69389
networkx
5
210,293
17
10
6
82
12
0
21
63
predict_skeleton_with_mot
Pipeline with kpt and act (#5399) * add keypoint infer and visualize into Pipeline * add independent action model inference * add action inference into pipeline, still in working * test different display frames and normalization methods * use bbox and scale normalization * Remove debug info and Optimize code structure * remove useless visual param * make action parameters configurable
https://github.com/PaddlePaddle/PaddleDetection.git
def predict_skeleton_with_mot(self, skeleton_with_mot, run_benchmark=False): skeleton_list = skeleton_with_mot["skeleton"] mot_id = skeleton_with_mot["mot_id"] act_res = self.predict_skeleton(skeleton_list, run_benchmark, repeats=1) results = list(zip(mot_id, act_res)) return results
51
action_infer.py
Python
deploy/python/action_infer.py
7018dad10757b6d414f1b00a547244bced596d68
PaddleDetection
1
322,141
4
9
2
28
4
0
4
10
istree
Update neural search readme and Add Paddle Serving Support (#1558) * add recall inference similarity * update examples * updatea readme * update dir name * update neural search readme * update milvus readme * update domain adaptive pretraining readme * fix the mistakes * update readme * add recall Paddle Serving Support * update readme * update readme and format the code * reformat the files * move the files * reformat the code * remove redundant code Co-authored-by: Zeyu Chen <[email protected]> Co-authored-by: tianxin <[email protected]>
https://github.com/PaddlePaddle/PaddleNLP.git
def istree(sequence): return DepTree(sequence).judge_legal()
15
utils.py
Python
examples/dependency_parsing/ddparser/utils.py
621357338437ee420eabbbf5ab19065bc85e73a5
PaddleNLP
1
273,618
6
6
2
20
5
0
6
20
call
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def call(self, inputs, states): raise NotImplementedError
12
abstract_rnn_cell.py
Python
keras/layers/rnn/abstract_rnn_cell.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
177,252
53
14
20
223
23
0
73
220
intersection_all
Make all.py generator friendly (#5984) * Make compose_all generator friendly * Make disjoint_union_all and intersection_all generator friendly * Refactor disjoint_union_all to yield relabeled graphs * Make union_all generator friendly * Fix intersection_all * Fix union_all signature * Allow passing an infinite rename generator to union_all * Copy over generalizations to binary.py * Clean up rename * Simplify first_label in disjoint_union_all * Simplify disjoint_union_all * Add missing R.graph.update in intersection_all
https://github.com/networkx/networkx.git
def intersection_all(graphs): R = None for i, G in enumerate(graphs): G_nodes_set = set(G.nodes) G_edges_set = set(G.edges(keys=True) if G.is_multigraph() else G.edges()) if i == 0: # create new graph R = G.__class__() node_intersection = G_nodes_set edge_intersection = G_edges_set elif G.is_multigraph() != R.is_multigraph(): raise nx.NetworkXError("All graphs must be graphs or multigraphs.") else: node_intersection &= G_nodes_set edge_intersection &= G_edges_set R.graph.update(G.graph) if R is None: raise ValueError("cannot apply intersection_all to an empty list") R.add_nodes_from(node_intersection) R.add_edges_from(edge_intersection) return R
132
all.py
Python
networkx/algorithms/operators/all.py
50ff08de69c6e9541cd6c029bede5dabf56cfe73
networkx
6
60,403
12
11
6
61
7
0
12
22
PrintUsage
Balanced joint maximum mean discrepancy for deep transfer learning
https://github.com/jindongwang/transferlearning.git
def PrintUsage(message): sys.stderr.write(_USAGE) if message: sys.exit('\nFATAL ERROR: ' + message) else: sys.exit(1)
33
cpp_lint.py
Python
code/deep/BJMMD/caffe/scripts/cpp_lint.py
cc4d0564756ca067516f71718a3d135996525909
transferlearning
2
249,109
91
11
61
464
38
0
159
762
test_delete_media
Use literals in place of `HTTPStatus` constants in tests (#13469)
https://github.com/matrix-org/synapse.git
def test_delete_media(self) -> None: download_resource = self.media_repo.children[b"download"] upload_resource = self.media_repo.children[b"upload"] # Upload some media into the room response = self.helper.upload_media( upload_resource, SMALL_PNG, tok=self.admin_user_tok, expect_code=200, ) # Extract media ID from the response server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://' server_name, media_id = server_and_media_id.split("/") self.assertEqual(server_name, self.server_name) # Attempt to access media channel = make_request( self.reactor, FakeSite(download_resource, self.reactor), "GET", server_and_media_id, shorthand=False, access_token=self.admin_user_tok, ) # Should be successful self.assertEqual( 200, channel.code, msg=( "Expected to receive a 200 on accessing media: %s" % server_and_media_id ), ) # Test if the file exists local_path = self.filepaths.local_media_filepath(media_id) self.assertTrue(os.path.exists(local_path)) url = "/_synapse/admin/v1/media/%s/%s" % (self.server_name, media_id) # Delete media channel = self.make_request( "DELETE", url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(1, channel.json_body["total"]) self.assertEqual( media_id, channel.json_body["deleted_media"][0], ) # Attempt to access media channel = make_request( self.reactor, FakeSite(download_resource, self.reactor), "GET", server_and_media_id, shorthand=False, access_token=self.admin_user_tok, ) self.assertEqual( HTTPStatus.NOT_FOUND, channel.code, msg=( "Expected to receive a HTTPStatus.NOT_FOUND on accessing deleted media: %s" % server_and_media_id ), ) # Test if the file is deleted self.assertFalse(os.path.exists(local_path))
297
test_media.py
Python
tests/rest/admin/test_media.py
c97042f7eef3748e17c90e48a4122389a89c4735
synapse
1
323,136
8
8
6
28
5
0
8
22
is_world_process_zero
[Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761) * add some datasets for finetune. * support fine tune for all tastks. * add trainer prototype. * init verison for paddlenlp trainer. * refine trainer. * update for some details. * support multi-cards training evaluation. * support load from ckpt. * support for export inference model. * first version of trainer. * seq cls support clue. * trainer support for token classification and question answersing tasks. * fix as reviews. Co-authored-by: Zeyu Chen <[email protected]>
https://github.com/PaddlePaddle/PaddleNLP.git
def is_world_process_zero(self) -> bool: return self.args.process_index == 0
16
trainer_base.py
Python
paddlenlp/trainer/trainer_base.py
44a290e94d1becd1f09fddc3d873f9e19c9d6919
PaddleNLP
1
64,314
30
14
25
187
11
0
42
19
show_job_status
feat: Bulk Transaction Processing (#28580) * feat: Bulk Transaction Processing * fix: add flags to ignore validations and exception handling correction * fix: remove duplicate code, added logger functionality and improved notifications * fix: linting and sider issues * test: added tests * fix: linter issues * fix: failing test case * fix: sider issues and test cases * refactor: mapping function calls to create order/invoice * fix: added more test cases to increase coverage * fix: test cases * fix: sider issue * fix: rename doctype, improve formatting and minor refactor * fix: update doctype name in hooks and sider issues * fix: entry log test case * fix: typos, translations and company name in tests * fix: linter issues and translations * fix: linter issue * fix: split into separate function for marking failed transaction * fix: typos, retry failed transaction logic and make log read only * fix: hide retry button when no failed transactions and remove test cases not rrelevant * fix: sider issues and indentation to tabs Co-authored-by: Ankush Menat <[email protected]>
https://github.com/frappe/erpnext.git
def show_job_status(failed_history, deserialized_data, to_doctype): if not failed_history: frappe.msgprint( _("Creation of {0} successful").format(to_doctype), title="Successful", indicator="green", ) if len(failed_history) != 0 and len(failed_history) < len(deserialized_data): frappe.msgprint( _().format( to_doctype ), title="Partially successful", indicator="orange", ) if len(failed_history) == len(deserialized_data): frappe.msgprint( _().format( to_doctype ), title="Failed", indicator="red", )
111
bulk_transaction.py
Python
erpnext/utilities/bulk_transaction.py
a3e69cf75d27198132d05c7c10475a0297b1e190
erpnext
5
43,469
9
10
4
56
11
0
9
41
test_create_queue_exception
Implement Azure Service Bus Queue Operators (#24038) Implemented Azure Service Bus Queue based Operator's to create queue, send message to the queue and receive message(list of message or batch message) and delete queue in azure service - Added `AzureServiceBusCreateQueueOperator` - Added `AzureServiceBusSendMessageOperator` - Added `AzureServiceBusReceiveMessageOperator` - Added `AzureServiceBusDeleteQueueOperator` - Added Example DAG - Added Documentation - Added hooks and connection type in - provider yaml file - Added unit Test case, doc strings
https://github.com/apache/airflow.git
def test_create_queue_exception(self, mock_sb_admin_client): hook = AdminClientHook(azure_service_bus_conn_id=self.conn_id) with pytest.raises(TypeError): hook.create_queue(None)
32
test_asb.py
Python
tests/providers/microsoft/azure/hooks/test_asb.py
09f38ad3f6872bae5059a1de226362eb358c4a7a
airflow
1