content
stringlengths
7
2.61M
/** * Overwrite of the API method for stoping the Topic connection. * * @throws IllegalStateException If the connection is closed or broken. * @see Connection#stop() */ public void stop() throws JMSException { synchronized (this) { ensureOpen(); setModified(); if (!stopped) { if (_logger.isDebugEnabled()) { _logger.debug("GSConnectionImpl.stop() stopping connection: " + toString()); } for (int i = 0; i < sessions.size(); i++) { GSSessionImpl session = (GSSessionImpl) sessions.get(i); session.stop(); } if (_logger.isDebugEnabled()) { _logger.debug("GSConnectionImpl.stop() connection was stopped: " + getCnxKey()); } stopped = true; } } }
Despite the ruling a 'stay' will keep the ban in effect and Gov. Rick Perry says Texans voted otherwise. (Reuters) A Texas judge has struck down that state's ban on gay marriage. U.S. District Judge Orlando Garcia did not say gay marriages could be performed immediately. Instead, he stayed the decision, citing a likely appeal. "Without a rational relation to a legitimate governmental purpose, state-imposed inequality can find no refuge in our United States Constitution," Garcia wrote in his decision. "These Texas laws deny Plaintiffs access to the institution of marriage and its numerous rights, privileges, and responsibilities for the sole reason that Plaintiffs wish to be married to a person of the same sex." The state's gay marriage ban was challenged by two gay couples -- one seeking to marry in Texas and one seeking to have their marriage, which was performed in Massachusetts, to be recognized. The case appears to be headed for the U.S. Supreme Court. Texas Attorney General Greg Abbott (R), who is also seeking to become the state's next governor, is expected to appeal. Texas Democrats hailed the decision. "This is a historic day for the LGBT community and the state of Texas," state Democratic Party Chairman Gilberto Hinojosa said. "As Dr. King once stated, 'The arc of the moral universe is long, but it bends towards justice.' Today, all Texans can celebrate that we are one step closer to justice and equality for all." Texas Gov. Rick Perry (R) said his state will indeed seek to uphold the ban. "Texans spoke loud and clear by overwhelmingly voting to define marriage as a union between a man and a woman in our Constitution, and it is not the role of the federal government to overturn the will of our citizens," Perry said, adding: "We will continue to fight for the rights of Texans to self-determine the laws of our state." Texas is the latest in a quick succession of states in which a judge has struck down a ban on gay marriage. Judges in Virginia, New Jersey, Oklahoma, Utah and California have also struck down those states' bans. A judge in New Mexico also recently legalized gay marriage, though that state didn't have a ban in place. Judges in Ohio and Kentucky have instructed their states to recognize gay marriages performed elsewhere. Seventeen states, not including Texas, currently allow gay marriage. Texas voters remain split on gay marriage, with a recent poll showing 48 percent favor it and 49 percent oppose it. Below is the full ruling: Federal judge's ruling on Texas gay marriage ban Updated at 3:53 p.m.
This invention relates to a fuel injection control apparatus for an internal combustion engine, and in particular to a fuel control apparatus for processing the measured values of the inlet air flow rate of an internal combustion engine for an automobile. Heretofore, there has been proposed such a fuel control apparatus for an internal combustion engine as shown in FIG. 1. In the figure, an internal combustion engine 1 is supplied with fuel by an electromagnetically driven injector 2. A hot-wire type air flow sensor (hereinafter abbreviated as AFS) 3 for sensing the flow rate of an inlet air inhaled into the engine 1 and a throttle valve 5 for adjusting the flow rate of the inlet air into the engine 1 are mounted on the inlet pipe 6 as shown in FIG. 1. A water (coolant) temperature sensor 7 is also disposed near the engine 1 to indicate the temperature of the engine 1. An ignition control unit 8 computes a fuel amount to be supplied to the engine 1 from an air flow rate signal obtained by the AFS 3 and applies to the injector 2 pulses whose pulse widths correspond to a required fuel amount. The ignition control unit 8 is connected to a well known ignition device 9 which generates an ignition pulse signal each time the engine 1 is at a predetermined rotational angle. Also disposed in this fuel control apparatus are a fuel tank 11, a fuel pump 12 for pressurizing the fuel, and a fuel regulator 13 for maintaining a constant pressure on the fuel supplied to the injector 2, as is well known in the art. The ignition control unit 8 includes an input interface circuit 80, a micro-processor 81 for processing various input signals from the input interface circuit 80, computing a fuel amount to be supplied to the inlet pipe 6 of the engine 1 in accordance with a program previously stored in a ROM 82, and for controlling the driving signal of the injector 2, a RAM 83 for temporarily storing data during the process of the computation of the micro-processor 81, and an output interface circuit 84 for driving the injector 2. In the operation of the fuel injection control apparatus for an engine shown in FIG. 1, in the well known manner, the control unit 8 receives as an input an inlet air flow rate of the engine 1 detected by the AFS 3, calculates a fuel amount to be supplied to the engine 1 on the basis of the detected flow rate, detects the rotational speed of the engine 1 from the ignition pulse frequency provided by the ignition device 9, calculates a fuel amount per one engine revolution, and applies pulses with a required pulse width to the injector 2 in synchronization with the ignition pulses. It is to be noted that since the air/fuel (hereinafter abbreviated as A/F) ratio required for the engine 1 needs to be preset at the rich side when the temperature of the engine 1 is low, the pulse width of the pulses applied to the injector 2 may be incrementally corrected in accordance with thermal signals obtained from the coolant temperature sensor 7. Since the AFS 3 used for this fuel control apparatus can detect the inlet air flow rate by the weight thereof, it has an excellent feature that there is no need to additionally provide a correction means for changes in the atmospheric pressure. However, the AFS 3 is quite sensitive to an air blow-back phenomenon caused by the overlapped operation of the inlet and exhaust valves of the engine whereby the AFS 3 detects an inlet air flow rate signal including the blow-back flow rate so that it erroneously develops an output signal indicative of a flow rate larger than the actual inlet air flow rate. The aforementioned blow-back phenomenon may easily arise during low speeds of the engine and in a condition where the throttle valve of the engine is fully opened, where the true inlet air flow rate assumes such a waveform as if the inlet air flow rate has increased as shown in FIG. 2, despite the fact that no inlet air is inhaled during a time interval Tr. As a result, as shown in FIG. 3, the output of the AFS 3 exhibits a value considerably higher than the true value (shown by dotted lines) during a low speed zone (or region) and in the fully opened condition of the throttle valve. Dependent on the layout of the engine or the inlet air system, an error due to the blow-back phenomenon may attain as much as a 50% increase of the true value so that such an AFS can not be made practical without any modification thereof. In order to compensate for such an error, there has been proposed a system in which the output signal "a" shown by the arcuate portion of a solid curve in FIG. 4 provided by the AFS 3 is neglected. Instead a clipping value "c" (average value), shown by a dotted line in FIG. 4, which is somewhat larger (by e.g. 10%) than a value "b" (actual value) of the true inlet air flow rate of the engine 1 is determined by reading from the ROM 82 the maximum inlet air flow rate (including some variation) corresponding to speed of the engine 1, which was previously stored in the ROM 82. This operation based on the concept of FIG. 4 is illustrated in the flow chart shown in FIG. 5. Namely, at first, an inlet air flow rate (Qa) is read in by the AFS 3 and an engine speed (Ne) is read in by the ignition device 9 (step T1 and T2). It is then checked in step T3 whether or not Qa&gt;c(Ne), i.e. whether or not Qa is larger than the clipping value c(Ne) which is a function of the engine speed Ne. If the answer is "yes", then the clipping operation is made in step T4 so that the inlet air flow rate is clipped to c(Ne). If the answer is "no", then no clipping operation is made as illustrated in step T5 so that the inlet air flow rate Qa is directly used. Then, the pulse width of the pulse to be applied to the injector 2 is calculated in step T6 according to the well known equation: To=KxQ/Ne where K is a predetermined constant. However, according to this system, the clipping value "c" shown in FIG. 4 for the inlet air flow rate is preset at maximum inlet air flow rate for the engine 1 being at sea level, and therefore, an A/F ratio for a low atmospheric pressure when a car is being driven at a higher altitude should be largely shifted towards the rich side, resulting in a possibility of not only wasting fuel but also inducing a misfire. On the other hand, another correction system of subtracting a blow-back waveform from the inlet air waveform has also been proposed. However, the blow-back waveform gradually varies relative to the opening of the throttle valve and the engine speed so that the discrimination between the blow-back waveform and the inlet air waveform can not be precisely made. One example of this system is disclosed in Japanese Patent Application Laid-open No. 56-108909 published Aug. 28, 1981. This publication describes an air flow rate detector in which a hot-wire type AFS is used to detect the inlet air flow rate by correcting an error due to the blow-back air flow rate. In such a fuel injection control apparatus for an internal combustion engine thus arranged, a disadvantage is that the hot-wire type AFS used therein erroneously detects the inlet air flow rate to be higher than the true value due to the air blow-back phenomenon arising during low engine speed and in the fully opened condition of the throttle valve due to the overlapped operation of the valves of the engine so that an operating zone exists where the A/F ratio can not be properly controlled.
<filename>src/main/java/com/course/spring/exceptions/DataBaseException.java package com.course.spring.exceptions; public class DataBaseException extends RuntimeException { public DataBaseException(String message){ super(message); } }
<reponame>Richard-DEPIERRE/kuzzle /* * Kuzzle, a backend software, self-hostable and ready to use * to power modern apps * * Copyright 2015-2020 Kuzzle * mailto: support AT kuzzle.io * website: http://kuzzle.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import Bluebird from 'bluebird'; import { JSONObject } from 'kuzzle-sdk'; import { Koncorde, NormalizedFilter } from 'koncorde'; import { KuzzleRequest, Request, RequestContext } from '../../api/request'; import kerror from '../../kerror'; import createDebug from '../../util/debug'; import { fromKoncordeIndex, getCollections, toKoncordeIndex, } from '../../util/koncordeCompat'; import { User, RoomList } from '../../types'; import { Channel } from './channel'; import { ConnectionRooms } from './connectionRooms'; import { Room } from './room'; import { Subscription } from './subscription'; const realtimeError = kerror.wrap('core', 'realtime'); const debug = createDebug('kuzzle:realtime:hotelClerk'); /** * The HotelClerk is responsible of keeping the list of rooms and subscriptions * made to those rooms. * * When a subscription is made to a room, the HotelClerk link the connection * to a channel of this room. Each channel represents a specific configuration * about which kind of notification the subscriber should receive (e.g. scope in/out) * * When an user is subscribing, we send him back the channel he is subscribing to. * * Here stop the role of the HotelClerk, then the notifier will select the channels * according to the notification and notify them. */ export class HotelClerk { private module: any; /** * Number of created rooms. * * Used with the "subscriptionRooms" configuration limit. */ private roomsCount = 0; /** * Current realtime rooms. * * This object is used by the notifier to list wich channel has to be notified * when a subscription scope is matching. * It's also used to notify channels when an user join/exit a room. * * Map<roomId, Room> */ private rooms = new Map<string, Room>(); /** * Current subscribing connections handled by the HotelClerk. * * Each connection can subscribe to many rooms with different volatile data. * * This object is used to keep track of all subscriptions made by a connection * to be able to unsubscribe when a connection is removed. * * Map<connectionId, ConnectionRooms> */ private subscriptions = new Map<string, ConnectionRooms>(); /** * Shortcut to the Koncorde instance on the global object. */ private koncorde: Koncorde; constructor (realtimeModule: any) { this.module = realtimeModule; this.koncorde = global.kuzzle.koncorde; } /** * Registers the ask events. */ async init (): Promise<void> { /** * Create a new, empty room. * @param {string} index * @param {string} collection * @param {string} roomId * @returns {boolean} status indicating if the room was created or not */ global.kuzzle.onAsk( 'core:realtime:room:create', (index, collection, roomId) => this.newRoom(index, collection, roomId)); /** * Joins an existing room. * @param {Request} request * @returns {Promise} */ global.kuzzle.onAsk( 'core:realtime:join', request => this.join(request)); /** * Return the list of index, collection, rooms (+ their users count) * on all index/collection pairs that the requesting user is allowed to * subscribe * * @param {User} user * @return {number} * @throws {NotFoundError} If the roomId does not exist */ global.kuzzle.onAsk('core:realtime:list', user => this.list(user)); /** * Given an index, returns the list of collections having subscriptions * on them. * @param {string} index * @return {Array.<string>} */ global.kuzzle.onAsk( 'core:realtime:collections:get', index => this.listCollections(index)); /** * Removes a user and all their subscriptions. * @param {string} connectionId */ global.kuzzle.onAsk( 'core:realtime:connection:remove', connectionId => this.removeConnection(connectionId)); /** * Adds a new user subscription * @param {Request} request * @return {Object|null} */ global.kuzzle.onAsk( 'core:realtime:subscribe', request => this.subscribe(request)); /** * Unsubscribes a user from a room * @param {string} connectionId * @param {string} roomId * @param {string} kuid * @param {boolean} [notify] */ global.kuzzle.onAsk( 'core:realtime:unsubscribe', (connectionId, roomId, notify) => { return this.unsubscribe(connectionId, roomId, notify); }); /** * Returns inner metrics from the HotelClerk * @return {{rooms: number, subscriptions: number}} */ global.kuzzle.onAsk( 'core:realtime:hotelClerk:metrics', () => this.metrics()); /** * Clear the hotel clerk and properly disconnect connections. */ global.kuzzle.on('kuzzle:shutdown', () => this.clearConnections()); /** * Clear subscriptions when a connection is dropped */ global.kuzzle.on('connection:remove', connection => { this.removeConnection(connection.id) .catch(err => global.kuzzle.log.info(err)); }); } /** * Subscribe a connection to a realtime room. * * The room will be created if it does not already exists. * * Notify other subscribers on this room about this new subscription * * @throws Throws if the user has already subscribed to this room name * (just for rooms with same name, there is no error if the room * has a different name with same filter) or if there is an error * during room creation */ async subscribe (request: KuzzleRequest): Promise<{ channel: string, roomId: string }> { const { index, collection } = request.input.resource; if (! index) { return kerror.reject('api', 'assert', 'missing_argument', 'index'); } if (! collection) { return kerror.reject('api', 'assert', 'missing_argument', 'collection'); } /* * /!\ This check is a duplicate to the one already made by the * funnel controller. THIS IS INTENTIONAL. * * This is to prevent subscriptions to be made on dead * connections. And between the funnel and here, there is * time for a connection to drop, so while the check * on the funnel is useful for many use cases, this one * is made on the very last moment and is essential to ensure * that no zombie subscription can be performed */ if (! global.kuzzle.router.isConnectionAlive(request.context)) { return null; } let normalized: NormalizedFilter; try { normalized = this.koncorde.normalize( request.input.body, toKoncordeIndex(index, collection)); } catch (e) { throw kerror.get('api', 'assert', 'koncorde_dsl_error', e.message); } this.createRoom(normalized); const { channel, subscribed } = await this.subscribeToRoom( normalized.id, request); if (subscribed) { global.kuzzle.emit('core:realtime:subscribe:after', normalized.id); // @deprecated -- to be removed in next major version // we have to recreate the old "diff" object await global.kuzzle.pipe('core:hotelClerk:addSubscription', { changed: subscribed, collection, connectionId: request.context.connection.id, filters: normalized.filter, index, roomId: normalized.id, }); } const subscription = new Subscription( index, collection, request.input.body, normalized.id, request.context.connection.id, request.context.user); global.kuzzle.emit('core:realtime:user:subscribe:after', subscription); return { channel, roomId: normalized.id, }; } /** * Returns the list of collections of an index with realtime rooms. */ listCollections (index: string): string[] { return getCollections(this.koncorde, index); } /** * Joins an existing realtime room. * * The room may exists on another cluster node, if it's the case, the normalized * filters will be fetched from the cluster. */ async join (request: KuzzleRequest): Promise<{ channel, roomId }> { const roomId = request.input.body.roomId; if (! this.rooms.has(roomId)) { const normalized: NormalizedFilter = await global.kuzzle.ask( 'cluster:realtime:filters:get', roomId); if (! normalized) { throw realtimeError.get('room_not_found', roomId); } this.createRoom(normalized); } const { channel, cluster, subscribed } = await this.subscribeToRoom(roomId, request); if (cluster && subscribed) { global.kuzzle.emit('core:realtime:subscribe:after', roomId); } return { channel, roomId, }; } /** * Return the list of index, collection, rooms and subscribing connections * on all index/collection pairs that the requesting user is allowed to * subscribe. */ async list (user: User): Promise<RoomList> { // We need the room list from the cluster's full state, NOT the one stored // in Koncorde: the latter also contains subscriptions created by the // framework (or by plugins), and we don't want those to appear in the API const fullStateRooms: RoomList = await global.kuzzle.ask('cluster:realtime:room:list'); const isAllowedRequest = new KuzzleRequest({ action: 'subscribe', controller: 'realtime', }, {}); for (const [index, collections] of Object.entries(fullStateRooms)) { isAllowedRequest.input.resource.index = index; const toRemove = await Bluebird.filter( Object.keys(collections), collection => { isAllowedRequest.input.resource.collection = collection; return ! user.isActionAllowed(isAllowedRequest); }); for (const collection of toRemove) { delete fullStateRooms[index][collection]; } } return fullStateRooms; } /** * Removes a connections and unsubscribe it from every subscribed rooms. * * Usually called when an user has been disconnected from Kuzzle. */ async removeConnection (connectionId: string, notify = true): Promise<void> { const connectionRooms = this.subscriptions.get(connectionId); if (! connectionRooms) { // No need to raise an error if the connection does not have room subscriptions return; } await Bluebird.map(connectionRooms.roomIds, (roomId: string) => ( this.unsubscribe(connectionId, roomId, notify).catch(global.kuzzle.log.error) )); } /** * Clear all connections made to this node: * - trigger appropriate core events * - send user exit room notifications */ async clearConnections (): Promise<void> { await Bluebird.map(this.subscriptions.keys(), (connectionId: string) => ( this.removeConnection(connectionId, false) )); } /** * Register a new subscription * - save the subscription on the provided room with volatile data * - add the connection to the list of active connections of the room */ private registerSubscription (connectionId: string, roomId: string, volatile: JSONObject): void { debug('Add room %s for connection %s', roomId, connectionId); let connectionRooms = this.subscriptions.get(connectionId); if (! connectionRooms) { connectionRooms = new ConnectionRooms(); this.subscriptions.set(connectionId, connectionRooms); } connectionRooms.addRoom(roomId, volatile); this.rooms.get(roomId).addConnection(connectionId); } /** * Create new room if needed * * @returns {void} */ private createRoom (normalized: NormalizedFilter): void { const { index: koncordeIndex, id: roomId } = normalized; const { index, collection } = fromKoncordeIndex(koncordeIndex); if (this.rooms.has(normalized.id)) { return; } const roomsLimit = global.kuzzle.config.limits.subscriptionRooms; if (roomsLimit > 0 && this.roomsCount >= roomsLimit) { throw realtimeError.get('too_many_rooms'); } this.koncorde.store(normalized); global.kuzzle.emit('core:realtime:room:create:after', normalized); // @deprecated -- to be removed in the next major version of kuzzle global.kuzzle.emit('room:new', { collection, index, roomId }); /* In some very rare cases, the room may have been created between the beginning of the function executed at the end of normalize, and this one Before incrementing the rooms count, we have to make sure this is not the case to ensure our counter is right */ if (this.newRoom(index, collection, roomId)) { this.roomsCount++; } } /** * Remove a connection from a room. * * Also delete the rooms if it was the last connection subscribing to it. * */ async unsubscribe (connectionId: string, roomId: string, notify = true) { const connectionRooms = this.subscriptions.get(connectionId); const requestContext = new RequestContext({ connection: { id: connectionId } }); if (! connectionRooms) { throw realtimeError.get('not_subscribed', connectionId, roomId); } const volatile = connectionRooms.getVolatile(roomId); if (volatile === undefined) { throw realtimeError.get('not_subscribed', connectionId, roomId); } if (connectionRooms.count > 1) { connectionRooms.removeRoom(roomId); } else { this.subscriptions.delete(connectionId); } const room = this.rooms.get(roomId); if (! room) { global.kuzzle.log.error(`Cannot remove room "${roomId}": room not found`); throw realtimeError.get('room_not_found', roomId); } for (const channel of Object.keys(room.channels)) { global.kuzzle.entryPoint.leaveChannel(channel, connectionId); } room.removeConnection(connectionId); if (room.size === 0) { await this.removeRoom(roomId); } // even if the room is deleted for this node, another one may need the // notification const request = new Request( { action: 'unsubscribe', collection: room.collection, controller: 'realtime', index: room.index, volatile, }, requestContext); await this.module.notifier.notifyUser(roomId, request, 'out', { count: room.size }); // Do not send an unsubscription notification if the room has been destroyed // because the other nodes already had destroyed it in the full state if ( notify && this.rooms.has(roomId) && room.channels.size > 0 ) { await global.kuzzle.pipe('core:realtime:unsubscribe:after', roomId); // @deprecated -- to be removed in next major version await global.kuzzle.pipe('core:hotelClerk:removeRoomForCustomer', { requestContext, room: { collection: room.collection, id: roomId, index: room.index, }, }); } const kuid = global.kuzzle.tokenManager.getKuidFromConnection(connectionId); const subscription = new Subscription( room.index, room.collection, undefined, roomId, connectionId, { _id: kuid }); global.kuzzle.emit('core:realtime:user:unsubscribe:after', { /* @deprecated */ requestContext, /* @deprecated */ room: { collection: room.collection, id: roomId, index: room.index, }, subscription, }); } /** * Returns inner metrics from the HotelClerk */ metrics (): {rooms: number, subscriptions: number} { return { rooms: this.roomsCount, subscriptions: this.subscriptions.size, }; } /** * Deletes a room if no user has subscribed to it, and removes it also from the * real-time engine */ private async removeRoom (roomId: string): Promise<void> { this.roomsCount--; this.rooms.delete(roomId); // We have to ask the cluster to dispatch the room removal event. // The cluster will also remove the room from Koncorde if no other node // uses it. // (this node may have no subscribers on it, but other nodes might) await global.kuzzle.ask('cluster:realtime:room:remove', roomId); // @deprecated -- to be removed in the next major version try { await global.kuzzle.pipe('room:remove', roomId); } catch (e) { return; } } /** * Subscribes a connection to an existing room. * * The subscription is made on a configuration channel who will be created * on the room if it does not already exists. * */ private async subscribeToRoom ( roomId: string, request: KuzzleRequest ): Promise<{ channel: string, cluster: boolean, subscribed: boolean }> { let subscribed = false; let notifyPromise; const { scope, users, propagate } = request.input.args; const connectionId = request.context.connection.id; const channel = new Channel(roomId, { propagate, scope, users }); const connectionRooms = this.subscriptions.get(connectionId); const room = this.rooms.get(roomId); if (! connectionRooms || ! connectionRooms.hasRoom(roomId)) { subscribed = true; this.registerSubscription(connectionId, roomId, request.input.volatile); notifyPromise = this.module.notifier.notifyUser( roomId, request, 'in', { count: room.size }); } else { notifyPromise = Bluebird.resolve(); } global.kuzzle.entryPoint.joinChannel(channel.name, connectionId); room.createChannel(channel); await notifyPromise; return { channel: channel.name, cluster: channel.cluster, subscribed, }; } /** * Create an empty room in the RAM cache if it doesn't exists * * @returns True if a new room has been created */ private newRoom (index: string, collection: string, roomId: string): boolean { if (! this.rooms.has(roomId)) { this.rooms.set(roomId, new Room(roomId, index, collection)); return true; } return false; } }
Al-Bab Name Al-Bāb in Arabic means the door. According to Arab geographer Yaqut al-Hamawi in 1226, the name is a shortening of Bāb Bizāʻah (the gate to Bizāʻah). Bizāʻah (also Buzāʻah and Bzāʻā) is a town located about 10 kilometres (6.2 miles) east of Al-Bāb. History During the Roman Empire, Al-Bab was a civitas of the Roman Province of Syria, known as Batnai. The ruins of that settlement lie on the banks of the wadi 1 kilometre (0.62 mi) north of the modern town. Roman Batnai should not be confused with the Roman town Batnae about 70 kilometres (43 mi) northeast. Al-Bab was conquered by the Arab army of the Rashidun Caliphate under caliph Umar ibn al-Khattab in the 7th Century. It received its name, meaning "the Gate", during Islamic rule as it served as "the gate" between Aleppo and the adjacent town of Buza'ah. The tomb and shrine of Aqil ibn Abi Talib (the brother of Ali) was located in al-Bāb. Until its rule by the Ayyubids in the 13th century, the town was populated mostly by Shias of the Ismaili sect. According to Yaqut al-Hamawi in 1226, it was a small town in the district of Aleppo. In the town were markets filled with cotton products called kirbas which were exported to Damascus and Egypt. The fourteenth-century historian Abu'l-Fida writes that al-Bab was a small town with a market, a bath, pleasant gardens, and a mosque (the Great Mosque of al-Bab). The fortunes of Al-Bab were shared with that of Aleppo when that city was conquered by the Ottoman Turks in 1516, and was administered as part of the Eyalet of Aleppo until 1866 and the Vilayet of Aleppo until January 1919, when the district was occupied by French troops and attached to the State of Aleppo within the French mandate of Syria. Syrian Civil War Until April 2012, Al-Bab had been relatively unscathed by the Syrian civil war. Between mid-May and mid-July, some 15 rebel groups formed within the city. The fight for Al-Bab included a series of raids and assaults on government offices over the course of two months, finally culminating on 18 July when rebels seized the final government stronghold within the city limits. According to opposition activists, an army garrison remained outside al-Bab and shelled the insurgents' positions. Rebel forces pushed the army from this garrison on the south edge of town on 29 July, With the seizure of al-Bab, the insurgents in northern Aleppo gained considerable momentum. The city's capture gave the militants full control of the areas northeast of Aleppo. However, in the summer of 2013 Islamic State of Iraq and the Levant had a presence in the town and by mid November, 2013, was in full control of Al-Bab. After the capture of Manbij by the Syrian Democratic Forces in August 2016, al-Bab was reported to be the next objective of the Syrian Democratic Forces (SDF) campaign. In December 2016, al-Bab came under an attack by Syrian rebels backed by Turkey. Turkish air strikes on December 21 destroyed 67 Islamic State targets; 59 Turkish soldiers and over 200 rebels were reported killed. Al-Bab was a strategically important town for Turkey because it did not want the two SDF regions to link up. On 23 February 2017, al-Bab was captured by Turkish-backed rebels, becoming a part of the Turkish buffer zone.
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="hive-progress-bar", # Replace with your own username version="0.0.4", author="<NAME>", author_email="<EMAIL>", description="A small progress bar for iterating over a collection and showing progress.", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/hive-one/hive-progress-bar", packages=setuptools.find_packages(), classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], python_requires='>=3.6', )
Anthony Crossley Anthony Crommelin Crossley (13 August 1903 – 15 August 1939) was a British writer, publisher and Conservative politician. Crossley was born on 13 August 1903, the only son of Sir Kenneth Irwin Crossley, 2nd Baronet. His father was chairman of Crossley Brothers Limited and Crossley Motors Limited. In 1916 Crossley enrolled at Eton College, completing his education at Magdalen College, Oxford. His flair for writing both poetry and prose led to his becoming a partner in the publishing house of Christopher's from 1928 to 1935. In 1927 he married Clare Thomson, daughter of Brigadier A F Thomson, and had two daughters and one son. Anthony Crossley died, aged 36, when the aircraft in which was travelling crashed into the sea off the Danish coast on 15 August 1939. Publications Crossley published three books of poetry: Aucassin and Nicolette and Other Poems, Prophets, Gods and Witches and Tragedy under Lucifer. His prose works showed his other interests: The History of Eton College Hunt, Chin Wag: The War Records of the Eton Manor Club and The Floating Line for Salmon and Sea Trout. Political career In 1931 Crossley was elected one of two Conservative Members of Parliament (MPs) for the two-seat Oldham borough constituency. At the next election in 1935 he was elected as MP for Stretford in south east Lancashire. He remained MP for the area until his death in 1939.
<filename>src/ilpa_cplex.hpp<gh_stars>0 #ifndef ILP_ABSTRACTION_CPLEX_HPP #define ILP_ABSTRACTION_CPLEX_HPP #include "common.hpp" #include <limits> #define IL_STD 1 #include <ilcplex/ilocplex.h> #include <string> #include <unordered_map> namespace ilpabstraction { namespace cplex_internal { class CallbackContext { public: inline CallbackContext(IloCplex::MIPInfoCallbackI * cplex_cb); inline double get_objective_value() const; inline double get_bound() const; inline double get_gap() const; inline double get_time() const; inline int get_processed_nodes() const; inline int get_open_nodes() const; private: IloCplex::MIPInfoCallbackI * cplex_cb; }; } // namespace cplex_internal class CPLEXVariable : public IloNumVar { public: using IloNumVar::IloNumVar; bool operator==(const CPLEXVariable & other) const { return other.getId() == this->getId(); } }; class CPLEXExpression : public IloExpr { public: using IloExpr::IloExpr; }; using CPLEXConstraint = std::pair<IloRange, IloRange>; class CPLEXInterface : public Interface<CPLEXVariable, CPLEXExpression, CPLEXConstraint, cplex_internal::CallbackContext> { public: using Base = Interface<CPLEXVariable, CPLEXExpression, CPLEXConstraint, cplex_internal::CallbackContext>; using Callback = Base::Callback; using CallbackContext = Base::CallbackContext; static constexpr const char * NAME = "CPLEX"; static constexpr const auto INFTY = +IloInfinity; static constexpr const auto NEGATIVE_INFTY = -IloInfinity; class Model : public Base::Model { public: template <class LowerValType, class UpperValType> inline Constraint add_constraint(LowerValType lower_bound, Expression expr, UpperValType upper_bound, std::string name = ""); template <class LowerValType, class UpperValType> inline Variable add_var(VariableType type, LowerValType lower_bound, UpperValType upper_bound, std::string name = ""); inline void add_sos1_constraint(const std::vector<Variable> & vars, const std::vector<double> & weights, std::string name = ""); inline void commit_variables(); inline void set_objective(Expression expr, ObjectiveType type); inline void solve(); inline double get_variable_assignment(const Variable & var) const; inline double get_objective_value() const; inline double get_bound() const; inline unsigned int get_variable_count(); inline unsigned int get_constraint_count(); inline unsigned int get_nonzero_count(); inline ModelStatus get_status() const; inline bool has_feasible() const; template <class T> void set_param(ParamType type, T val); template <class SolverParamType, class T> void set_param_passthrough(SolverParamType type, T val); template <class T> void set_start(Variable & var, T val); template <class LowerValType, class UpperValType> void change_var_bounds(Variable & var, LowerValType lower_bound, UpperValType upper_bound); template <class UpperValType> void change_constraint_ub(Constraint & constr, UpperValType upper_bound); template <class LowerValType> void change_constraint_lb(Constraint & constr, LowerValType lower_bound); inline void change_objective_coefficient(Variable & var, double coefficient); inline void write(const std::string & filename); inline void write_solution(const std::string & filename); IloObjective objective; inline ~Model(); inline void enable_kappa_statistics(); inline KappaStats kappa_stats(); protected: class CallbackAdapter : public IloCplex::MIPInfoCallbackI { public: inline static IloCplex::Callback create(IloEnv env, Model * model); inline virtual IloCplex::CallbackI * duplicateCallback() const override; protected: inline virtual void main() override; private: inline CallbackAdapter(IloEnv env, Model * model); Model * model; }; inline Model(CPLEXInterface * interface); CPLEXInterface * interface; IloCplex::Callback cba; friend class CPLEXInterface; IloCplex cplex; IloModel m; ModelStatus status; bool cplex_up_to_date; inline void extract(); inline void apply_start_solution(); std::unordered_map<long, IloNum> start_values; }; inline CPLEXInterface(bool auto_commit_variables); inline ~CPLEXInterface(); inline Model create_model(); inline Expression create_expression(); inline Variable create_variable(); static constexpr auto features() { return Features::FeatureList<Features::KAPPA_STATS>{}; } private: IloEnv env; }; } // namespace ilpabstraction /* * CPLEX thought it prudent to only define operator* * between its variables and int, not unsigned int. * This leads to ambiguous overloads when multiplying * by unsigned int, since there are a myriad of overloads * which could be converted to from unsigned it. Fix * this by giving an explicit overload for this case. */ inline auto operator*(const IloNumVar lhs, unsigned int rhs) { assert(rhs < static_cast<unsigned int>(std::numeric_limits<int>::max())); return lhs * static_cast<int>(rhs); } inline auto operator*(unsigned int lhs, IloNumVar rhs) { assert(lhs < static_cast<unsigned int>(std::numeric_limits<int>::max())); return static_cast<int>(lhs) * rhs; } inline auto operator*(ilpabstraction::CPLEXVariable & var, unsigned int i) { assert(i <= std::numeric_limits<int>::max()); return (IloNumVar)var * (int)i; } inline auto operator*(unsigned int i, ilpabstraction::CPLEXVariable & var) { assert(i <= std::numeric_limits<int>::max()); return (IloNumVar)var * (int)i; } inline auto operator*(ilpabstraction::CPLEXVariable & var, int i) { assert(i <= std::numeric_limits<int>::max()); return (IloNumVar)var * i; } inline auto operator*(int i, ilpabstraction::CPLEXVariable & var) { return (IloNumVar)var * i; } inline auto operator*(ilpabstraction::CPLEXVariable & var, double d) { return (IloNumVar)var * d; } inline auto operator*(double d, ilpabstraction::CPLEXVariable & var) { return (IloNumVar)var * d; } inline auto operator*(unsigned int i, ilpabstraction::CPLEXExpression & expr) { assert(i <= std::numeric_limits<int>::max()); return (IloExpr)expr * (int)i; } inline auto operator*(ilpabstraction::CPLEXExpression & expr, unsigned int i) { assert(i <= std::numeric_limits<int>::max()); return (IloExpr)expr * (int)i; } inline auto operator*(double d, ilpabstraction::CPLEXExpression & expr) { return (IloExpr)expr * d; } inline auto operator*(ilpabstraction::CPLEXExpression & expr, double d) { return (IloExpr)expr * d; } inline auto operator*(unsigned int i, const IloNumLinExprTerm & expr) { assert(i <= std::numeric_limits<int>::max()); return expr * (int)i; } inline auto operator*(const IloNumLinExprTerm & expr, unsigned int i) { assert(i <= std::numeric_limits<int>::max()); return expr * (int)i; } inline auto operator<=(unsigned int i, const IloNumExprArg & expr) { assert(i <= std::numeric_limits<int>::max()); return (int)i <= expr; } inline auto operator<=(const IloNumExprArg & expr, unsigned int i) { assert(i <= std::numeric_limits<int>::max()); return expr <= (int)i; } #ifndef ILP_ABSTRACTION_CPLEX_CPP #include "ilpa_cplex.cpp" #endif #endif // ILP_ABSTRACTION_CPLEX_HPP
Q: How late can I purchase Deutsche Bahn savings fare? I know I will be making a trip on a particular day, but I don't know exactly what time; yet, I still want to save money by purchasing the savings fare. How close to departure time of a train can I still purchase the savings fare ticket? Update: Yes, I'm referring to Deutsche Bahn. However, the tickets are for Euro Night. A: First, as Gilles pointed out there's likely a limit to how many on each train have this rate, so if you wait too long you risk missing out. However, assuming they're available, this link would appear to have the required information, for saving fares: Availabilty: From 91 days up to one day before travel Note that they also point out what we've suggested, that: Please note that the tickets are subject to availability. A: Availability is much more of an issue than any official limit. Basically, the Sparpreis offers exist in order to allow price discrimination: get people who care more about convenience than money to pay more, without losing customers that can't/won't afford the full price. So they're designed to run out well before the day the train runs and the sooner you buy, the cheaper it will be. However, if it is an expensive ticket, there may be a semi-workaround: The cancellation fee for Sparpreis tickets is only 15 EUR before the day of the trip. So even if you're not sure about the day and time, it may still be worthwhile to buy a ticket for the most likely time (or even more than one) as soon as possible, and cancel it if your plan turns out to be different. 15 EUR is typically much less than the money you save by getting an early Sparpreis ticket. A: Not too long ago, you needed to book a Sparpreis (savings fare) ticket three calendar days in advance. Then it changed to one day. Now, the official site explicitly says: The "Sparpreis" (saver fare for Germany) is for sale 91 days before intended travel date up until shortly before departure. (Emphasis mine) I personally haven’t tried getting a ticket that late, and remember that they are contingented, so they may be sold out especially for popular connections, but it seems that they are bookable until a few minutes before departure.
. This case report of a 31-year-old primiparous woman discusses the importance of early diagnosis and treatment of acute fatty liver in pregnancy (AFLP) to prevent maternal or/and foetal death. AFLP is a rare disease, which is characterised by maternal liver dysfunction and/or failure. Because of similar symptoms and laboratory findings AFLP may mimic and is often mistaken for 1) the more frequent HELLP syndrome with haemolysis, elevated liver enzymes and low platelet levels and 2) and severe pre-eclampsia.
import { AvlTree } from './avl-tree'; import { AvlTreeNode } from './avl-tree-node'; import { checkTree } from './avl-tree-utils'; const PRINT_TREES = false; describe('AvlTree', () => { it('initializes with zero size', () => { const tree = new AvlTree<number, string>(); expect(tree.size).toBe(0); }); describe('insert', () => { it('returns the tree node if inserted', () => { const tree = new AvlTree<number, string>(); const node = tree.insert(100, 'Hundred'); expect(node).toBeDefined(); expect(node?.key).toBe(100); expect(node?.value).toBe('Hundred'); expect(tree.size).toBe(1); }); it('returns undefined if nothing could be inserted', () => { const tree = new AvlTree<number, string>(); tree.insert(100, 'Hundred'); const node = tree.insert(100, 'Hundred'); expect(node).toBeUndefined(); expect(tree.size).toBe(1); }); it('increments the size', () => { const tree = new AvlTree<number, string>(); tree.insert(100, 'Hundred'); tree.insert(101, 'Hundred and one'); tree.insert(102, 'Hundred and two'); tree.insert(99, 'Ninety-nine'); tree.insert(150, 'Hundred and fifty'); tree.insert(200, 'Two hundred'); tree.insert(130, 'Hundred and thirty'); expect(tree.size).toBe(7); expect(tree.get(100)).toBe('Hundred'); expect(tree.get(101)).toBe('Hundred and one'); expect(tree.get(102)).toBe('Hundred and two'); expect(tree.get(130)).toBe('Hundred and thirty'); expect(tree.get(150)).toBe('Hundred and fifty'); expect(tree.get(200)).toBe('Two hundred'); expect(tree.get(99)).toBe('Ninety-nine'); expect(tree.get(98)).toBeUndefined(); expect(tree.get(97)).toBeUndefined(); }); it('performs right rotation if necessary', () => { /** * 100 Rotate right 50 * / \ / \ * 50 150 ====> 25 100 * / \ / / \ * 25 75 12 75 150 * / * 12 (new) */ const tree = new AvlTree<number, number>(); tree.insert(100, 100); tree.insert(50, 50); tree.insert(150, 150); tree.insert(25, 25); tree.insert(75, 75); tree.insert(12, 12); // should trigger rotation const root = tree.root; expect(root?.value).toBe(50); expect(root?.left?.key).toBe(25); expect(root?.left?.left?.key).toBe(12); expect(root?.right?.key).toBe(100); expect(root?.right?.left?.key).toBe(75); expect(root?.right?.right?.key).toBe(150); }); it('performs left-right rotation if necessary (case 1)', () => { /** * 100 Rotate left 100* Rotate right 75 * / \ / \ / \ * 50* 150 ====> 75 150 ===> 50 100 * / \ / \ / / \ * 25 75 50 80 25 80 150 * \ / * 80 (new) 25 */ const tree = new AvlTree<number, number>(); tree.insert(100, 100); tree.insert(50, 50); tree.insert(150, 150); tree.insert(25, 25); tree.insert(75, 75); tree.insert(80, 80); // should trigger rotation const root = tree.root; expect(root?.value).toBe(75); expect(root?.left?.key).toBe(50); expect(root?.left?.left?.key).toBe(25); expect(root?.right?.key).toBe(100); expect(root?.right?.left?.key).toBe(80); expect(root?.right?.right?.key).toBe(150); }); it('performs left-right rotation if necessary (case 2)', () => { /** * 100 Rotate left 100* Rotate right 75 * / \ / \ / \ * 50* 150 ====> 75 150 ===> 50 100 * / \ / / \ \ * 25 75 50 25 60 150 * / / \ * 60(new) 25 60 */ const tree = new AvlTree<number, number>(); tree.insert(100, 100); tree.insert(50, 50); tree.insert(150, 150); tree.insert(25, 25); tree.insert(75, 75); tree.insert(60, 60); // should trigger rotation const root = tree.root; expect(root?.value).toBe(75); expect(root?.left?.key).toBe(50); expect(root?.left?.left?.key).toBe(25); expect(root?.left?.right?.key).toBe(60); expect(root?.right?.key).toBe(100); expect(root?.right?.right?.key).toBe(150); }); it('performs left rotation if necessary', () => { /** * 100* Rotate left 150 * / \ / \ * 50 150 ====> 100 200 * / \ / \ \ * 125 200 50 125 300 * \ * 300 (new) */ const tree = new AvlTree<number, number>(); tree.insert(100, 100); tree.insert(50, 50); tree.insert(150, 150); tree.insert(125, 125); tree.insert(200, 200); tree.insert(300, 300); // should trigger rotation const root = tree.root; expect(root?.value).toBe(150); expect(root?.left?.key).toBe(100); expect(root?.left?.left?.key).toBe(50); expect(root?.left?.right?.key).toBe(125); expect(root?.right?.key).toBe(200); expect(root?.right?.right?.key).toBe(300); }); it('performs right-left rotation if necessary', () => { /** * 100 Rotate right 100* Rotate left 125 * / \ / \ / \ * 50 150* ====> 50 125 =====> 100 150 * / \ / \ / \ \ * 125 200 110 150 50 110 200 * / \ * 110(new) 200 */ const tree = new AvlTree<number, number>(); tree.insert(100, 100); tree.insert(50, 50); tree.insert(150, 150); tree.insert(125, 125); tree.insert(200, 200); tree.insert(110, 110); // should trigger rotation const root = tree.root; expect(root?.value).toBe(125); expect(root?.left?.key).toBe(100); expect(root?.left?.left?.key).toBe(50); expect(root?.left?.right?.key).toBe(110); expect(root?.right?.key).toBe(150); expect(root?.right?.right?.key).toBe(200); }); }); describe('delete', () => { it('removes a leaf node', () => { /** * Make a tree to start with: * * 40 * / \ * 20 60 * / \ / \ * 10 30 50 70 * / * 5 */ const tree = new AvlTree<number, number>(); tree.insert(40, 40); tree.insert(10, 10); tree.insert(50, 50); tree.insert(30, 30); tree.insert(20, 20); tree.insert(60, 60); tree.insert(70, 70); tree.insert(5, 5); // First remove a leaf node expect(tree.delete(30)).toBe(true); if (PRINT_TREES) { tree.print(); } expect(tree.size).toBe(7); expect(tree.keyList()).toEqual([5, 10, 20, 40, 50, 60, 70]); expect(tree.toJSON()).toMatchSnapshot(); /** * Expected situation: * * 40 Rotate right 40 * / \ / \ * 20* 60 ===> 10 60 * / / \ / \ / \ * 10 50 70 5 20 50 70 * / * 5 */ }); it('removes a root node when left-heavy and predecessor has left child', () => { /** * Make a tree to start with: * * 40* * / \ * 20 60 * / \ / \ * 10 30p 50 70 * / * 25pl */ const tree = new AvlTree<number, number>(); tree.insert(40, 40); tree.insert(20, 20); tree.insert(60, 60); tree.insert(10, 10); tree.insert(30, 30); tree.insert(50, 50); tree.insert(70, 70); tree.insert(25, 25); // Remove the root node expect(tree.delete(40)).toBe(true); if (PRINT_TREES) { tree.print(); } expect(tree.size).toBe(7); expect(tree.keyList()).toEqual([10, 20, 25, 30, 50, 60, 70]); expect(tree.toJSON()).toMatchSnapshot(); /** * Expected situation: * * 30p * / \ * 20 60 * / \ / \ * 10 25pl 50 70 */ }); it('removes a root node when left-heavy and predecessor has no left child', () => { /** * Make a tree to start with: * * 40* * / \ * 20 60 * / \ / \ * 10 30p 50 70 * \ * 15 */ const tree = new AvlTree<number, number>(); tree.insert(40, 40); tree.insert(20, 20); tree.insert(60, 60); tree.insert(10, 10); tree.insert(30, 30); tree.insert(50, 50); tree.insert(70, 70); tree.insert(15, 15); // Remove the root node expect(tree.delete(40)).toBe(true); if (PRINT_TREES) { tree.print(); } expect(tree.size).toBe(7); // expect(tree.keys()).toEqual([10, 15, 20, 30, 50, 60, 70]); expect(tree.toJSON()).toMatchSnapshot(); /** * Expected situation: * * 30 * / \ * 15 60 * / \ / \ * 10 20 50 70 */ }); it('removes a root node when right-heavy', () => { /** * Make a tree to start with: * * 40* * / \ * 20 60 * / \ / \ * 10 30 50 70 * \ * 55 */ const tree = new AvlTree<number, number>(); tree.insert(40, 40); tree.insert(20, 20); tree.insert(60, 60); tree.insert(10, 10); tree.insert(30, 30); tree.insert(50, 50); tree.insert(70, 70); tree.insert(55, 55); // Remove the root node expect(tree.delete(40)).toBe(true); if (PRINT_TREES) { tree.print(); } expect(tree.size).toBe(7); expect(tree.keyList()).toEqual([10, 20, 30, 50, 55, 60, 70]); expect(tree.toJSON()).toMatchSnapshot(); /** * Expected situation: * * 50 * / \ * 20 60 * / \ / \ * 10 30 55 70 */ }); it('removes a node with 2 children where its predecessor is its direct child', () => { const items = [4, 2, 6, 1, 3, 5, 9, 0]; const tree = new AvlTree<number, number>(); for (const item of items) { tree.insert(item, item); } /** * Results in tree: * 4 * / \ * 2* 6 * / \ / \ * 1p 3 5 9 * / * 0pl * * Remove 2: * 4 * / \ * 1p 6 * / \ / \ * 0pl 3 5 9 */ if (PRINT_TREES) { tree.print(); } const removed = tree.delete(2); if (PRINT_TREES) { tree.print(); } expect(removed).toBe(true); expect(tree.size).toBe(7); expect(tree.toJSON()).toMatchSnapshot(); }); it('removes an intermediate node when left-heavy and predecessor has no right child', () => { /** * Make a tree to start with: * * 40 * / \ * 20 60* * / \ / \ * 10 30 50 70 * \ * 55(pred) */ const tree = new AvlTree<number, number>(); tree.insert(40, 40); tree.insert(20, 20); tree.insert(60, 60); tree.insert(10, 10); tree.insert(30, 30); tree.insert(50, 50); tree.insert(70, 70); tree.insert(55, 55); if (PRINT_TREES) { tree.print(); } // Remove the root node expect(tree.delete(60)).toBe(true); if (PRINT_TREES) { tree.print(); } expect(tree.size).toBe(7); expect(tree.keyList()).toEqual([10, 20, 30, 40, 50, 55, 70]); expect(tree.toJSON()).toMatchSnapshot(); /** * Expected situation: * * 40 * / \ * 20 55 * / \ / \ * 10 30 50 70 */ }); it('removes an intermediate node when right-heavy and successor has no right child', () => { /** * Make a tree to start with: * * 40 * / \ * 20 60* * / \ / \ * 10 30 50 70 * / * 65 */ const tree = new AvlTree<number, number>(); tree.insert(40, 40); tree.insert(20, 20); tree.insert(60, 60); tree.insert(10, 10); tree.insert(30, 30); tree.insert(50, 50); tree.insert(70, 70); tree.insert(65, 65); if (PRINT_TREES) { tree.print(); } // Remove the root node expect(tree.delete(60)).toBe(true); if (PRINT_TREES) { tree.print(); } expect(tree.size).toBe(7); expect(tree.keyList()).toEqual([10, 20, 30, 40, 50, 65, 70]); expect(tree.toJSON()).toMatchSnapshot(); /** * Expected situation: * * 40 * / \ * 20 65 * / \ / \ * 10 30 50 70 */ }); it('removes a node that only has a left child', () => { /** * Make a tree to start with: * * 40 * / \ * 20 60* * / \ / * 10 30 50 * / * 25 */ const tree = new AvlTree<number, number>(); tree.insert(40, 40); tree.insert(20, 20); tree.insert(60, 60); tree.insert(10, 10); tree.insert(30, 30); tree.insert(50, 50); tree.insert(25, 25); if (PRINT_TREES) { tree.print(); } // Remove the root node expect(tree.delete(60)).toBe(true); if (PRINT_TREES) { tree.print(); } expect(tree.size).toBe(6); expect(tree.keyList()).toEqual([10, 20, 25, 30, 40, 50]); expect(tree.toJSON()).toMatchSnapshot(); /** * Expected situation: * * 30 * / \ * 20 40 * / \ \ * 10 25 50 */ }); it('removes a root node that only has a left child', () => { /** * Make a tree to start with: * * 1* * / * 0 */ const tree = new AvlTree<number, number>(); tree.insert(1, 1); tree.insert(0, 0); if (PRINT_TREES) { tree.print(); } // Remove the root node expect(tree.delete(1)).toBe(true); if (PRINT_TREES) { tree.print(); } expect(tree.size).toBe(1); expect(tree.keyList()).toEqual([0]); expect(tree.toJSON()).toMatchSnapshot(); /** * Expected situation: * * 30 * / \ * 20 40 * / \ \ * 10 25 50 */ }); it('removes a node that only has a right child', () => { /** * Make a tree to start with: * * 40 * / \ * 20 60* * / \ \ * 10 30 70 * / * 25 */ const tree = new AvlTree<number, number>(); tree.insert(40, 40); tree.insert(20, 20); tree.insert(60, 60); tree.insert(10, 10); tree.insert(30, 30); tree.insert(70, 70); tree.insert(25, 25); if (PRINT_TREES) { tree.print(); } // Remove the root node expect(tree.delete(60)).toBe(true); if (PRINT_TREES) { tree.print(); } expect(tree.size).toBe(6); expect(tree.keyList()).toEqual([10, 20, 25, 30, 40, 70]); expect(tree.toJSON()).toMatchSnapshot(); /** * Expected situation: * * 30 * / \ * 20 40 * / \ \ * 10 25 70 */ }); it('removes a balanced internal node', () => { /** * Tree: * 12 * / \ * 11 18* * / \ * 16p 19 */ const items = [12, 11, 18, 16, 19]; const tree = new AvlTree<number, number>(); for (const item of items) { tree.insert(item, item); checkTree(tree.root); } if (PRINT_TREES) { tree.print(); } const removed = tree.delete(18); expect(removed).toBe(true); expect(tree.size).toBe(4); if (PRINT_TREES) { tree.print(); } checkTree(tree.root); }); it('returns false when node is not found', () => { const tree = new AvlTree<number, number>(); expect(tree.delete(40)).toBe(false); tree.insert(40, 40); tree.insert(30, 30); expect(tree.delete(30)).toBe(true); expect(tree.delete(40)).toBe(true); expect(tree.delete(0)).toBe(false); }); }); describe('toJSON', () => { it('works for an empty tree', () => { expect(new AvlTree<number, number>().toJSON()).toBeUndefined(); }); it('returns a simple object tree', () => { const tree = new AvlTree<number, number>(); tree.insert(100, 100); tree.insert(50, 50); tree.insert(150, 150); tree.insert(125, 125); tree.insert(200, 200); tree.insert(300, 300); expect(tree.toJSON()).toMatchSnapshot(); }); it('allows for JSON.stringify support', () => { const tree = new AvlTree<number, number>(); tree.insert(100, 100); tree.insert(50, 50); tree.insert(150, 150); tree.insert(125, 125); tree.insert(200, 200); tree.insert(300, 300); expect(JSON.stringify(tree)).toMatchSnapshot(); }); it('allows for JSON.stringify support with empty trees', () => { const tree = new AvlTree<number, number>(); expect(JSON.stringify(tree)).toMatchSnapshot(); }); }); describe('has', () => { it('checks if a key exists in the tree', () => { const tree = new AvlTree<number, string>(); tree.insert(100, '100'); tree.insert(50, '50'); tree.insert(150, '150'); tree.insert(125, '125'); tree.insert(200, '200'); tree.insert(40, '40'); expect(tree.has(100)).toBe(true); expect(tree.has(50)).toBe(true); expect(tree.has(150)).toBe(true); expect(tree.has(125)).toBe(true); expect(tree.has(200)).toBe(true); expect(tree.has(40)).toBe(true); expect(tree.has(99)).toBe(false); expect(tree.has(101)).toBe(false); }); }); describe('keys', () => { it('gets a sorted array of keys', () => { const tree = new AvlTree<number, number>(); tree.insert(100, 100); tree.insert(50, 50); tree.insert(150, 150); tree.insert(125, 125); tree.insert(200, 200); tree.insert(40, 40); expect(Array.from(tree.keys())).toEqual([40, 50, 100, 125, 150, 200]); }); }); describe('values', () => { it('gets a sorted array of values', () => { const tree = new AvlTree<number, string>(); tree.insert(100, '100'); tree.insert(50, '50'); tree.insert(150, '150'); tree.insert(125, '125'); tree.insert(200, '200'); tree.insert(40, '40'); expect(Array.from(tree.values())).toEqual(['40', '50', '100', '125', '150', '200']); }); }); describe('entries', () => { it('gets a sorted array of key-value pairs', () => { const tree = new AvlTree<number, string>(); tree.insert(100, '100'); tree.insert(50, '50'); tree.insert(150, '150'); tree.insert(125, '125'); tree.insert(200, '200'); tree.insert(40, '40'); expect(Array.from(tree.entries())).toEqual([ [40, '40'], [50, '50'], [100, '100'], [125, '125'], [150, '150'], [200, '200'] ]); }); }); describe('at', () => { it('gets a node at an index', () => { const tree = new AvlTree<number, string>(); tree.insert(100, '100'); tree.insert(50, '50'); tree.insert(150, '150'); tree.insert(125, '125'); tree.insert(200, '200'); tree.insert(40, '40'); expect(tree.at(0)?.[0]).toBe(40); expect(tree.at(1)?.[0]).toBe(50); expect(tree.at(2)?.[0]).toBe(100); expect(tree.at(3)?.[0]).toBe(125); expect(tree.at(4)?.[0]).toBe(150); expect(tree.at(5)?.[0]).toBe(200); }); it('throws an error when out of bounds', () => { const tree = new AvlTree<number, string>(); expect(() => tree.at(0)).toThrow('Index out of bounds: 0'); tree.insert(100, '100'); tree.insert(50, '50'); tree.insert(150, '150'); expect(() => tree.at(3)).toThrow('Index out of bounds: 3'); }); }); describe('minNode', () => { it('gets the node with the lowest key', () => { const tree = new AvlTree<number, string>(); tree.insert(100, '100'); tree.insert(50, '50'); tree.insert(150, '150'); tree.insert(125, '125'); tree.insert(200, '200'); tree.insert(40, '40'); expect(tree.minNode()?.key).toBe(40); expect(tree.minKey()).toBe(40); expect(tree.minValue()).toBe('40'); }); it('handles the empty tree case', () => { const tree = new AvlTree<number, string>(); expect(tree.minNode()).toBeUndefined(); expect(tree.minKey()).toBeUndefined(); expect(tree.minValue()).toBeUndefined(); }); }); describe('maxNode', () => { it('gets the node with the highest key', () => { const tree = new AvlTree<number, string>(); tree.insert(100, '100'); tree.insert(50, '50'); tree.insert(150, '150'); tree.insert(125, '125'); tree.insert(200, '200'); tree.insert(40, '40'); expect(tree.maxNode()?.key).toBe(200); expect(tree.maxKey()).toBe(200); expect(tree.maxValue()).toBe('200'); }); it('handles the empty tree case', () => { const tree = new AvlTree<number, string>(); expect(tree.maxNode()).toBeUndefined(); expect(tree.maxKey()).toBeUndefined(); expect(tree.maxValue()).toBeUndefined(); }); }); describe('iterator', () => { it('iterates over all of the nodes', () => { const tree = new AvlTree<number, string>(); tree.insert(100, '100'); tree.insert(50, '50'); tree.insert(150, '150'); tree.insert(125, '125'); tree.insert(200, '200'); tree.insert(40, '40'); const results: [number, string][] = []; for (const entry of tree) { results.push(entry); } expect(results).toHaveLength(6); expect(results.map(([key]) => key)).toEqual([40, 50, 100, 125, 150, 200]); }); }); describe('predecessor', () => { it('finds the predecessor', () => { const tree = new AvlTree<number, string>(); const node100 = tree.insert(100, '100') as AvlTreeNode<number, string>; const node50 = tree.insert(50, '50') as AvlTreeNode<number, string>; const node150 = tree.insert(150, '150') as AvlTreeNode<number, string>; const node125 = tree.insert(125, '125') as AvlTreeNode<number, string>; const node200 = tree.insert(200, '200') as AvlTreeNode<number, string>; const node40 = tree.insert(40, '40') as AvlTreeNode<number, string>; const node220 = tree.insert(220, '220') as AvlTreeNode<number, string>; const node110 = tree.insert(110, '110') as AvlTreeNode<number, string>; const node75 = tree.insert(75, '75') as AvlTreeNode<number, string>; if (PRINT_TREES) { tree.print(); } expect(tree.predecessor(node40)?.key).toBeUndefined(); expect(tree.predecessor(node50)?.key).toBe(40); expect(tree.predecessor(node75)?.key).toBe(50); expect(tree.predecessor(node100)?.key).toBe(75); expect(tree.predecessor(node110)?.key).toBe(100); expect(tree.predecessor(node125)?.key).toBe(110); expect(tree.predecessor(node150)?.key).toBe(125); expect(tree.predecessor(node200)?.key).toBe(150); expect(tree.predecessor(node220)?.key).toBe(200); }); it('works in big cases', () => { const tree = new AvlTree<number, number>(); repeat(100, i => { tree.insert(i, i); }); repeat(99, i => { const node = tree.getNode(i + 1) as AvlTreeNode<number, number>; expect(tree.predecessor(node)?.key).toBe(i); }); }); }); describe('successor', () => { it('finds the successor', () => { const tree = new AvlTree<number, string>(); const node100 = tree.insert(100, '100') as AvlTreeNode<number, string>; const node50 = tree.insert(50, '50') as AvlTreeNode<number, string>; const node150 = tree.insert(150, '150') as AvlTreeNode<number, string>; const node125 = tree.insert(125, '125') as AvlTreeNode<number, string>; const node200 = tree.insert(200, '200') as AvlTreeNode<number, string>; const node40 = tree.insert(40, '40') as AvlTreeNode<number, string>; const node220 = tree.insert(220, '220') as AvlTreeNode<number, string>; const node110 = tree.insert(110, '110') as AvlTreeNode<number, string>; const node75 = tree.insert(75, '75') as AvlTreeNode<number, string>; if (PRINT_TREES) { tree.print(); } expect(tree.successor(node40)?.key).toBe(50); expect(tree.successor(node50)?.key).toBe(75); expect(tree.successor(node75)?.key).toBe(100); expect(tree.successor(node100)?.key).toBe(110); expect(tree.successor(node110)?.key).toBe(125); expect(tree.successor(node125)?.key).toBe(150); expect(tree.successor(node150)?.key).toBe(200); expect(tree.successor(node200)?.key).toBe(220); expect(tree.successor(node220)?.key).toBeUndefined(); }); it('works in big cases', () => { const tree = new AvlTree<number, number>(); repeat(100, i => { tree.insert(i, i); }); repeat(99, i => { const node = tree.getNode(i) as AvlTreeNode<number, number>; expect(tree.successor(node)?.key).toBe(i + 1); }); }); }); describe('toString', () => { it('prints out a tree', () => { const tree = new AvlTree<number, string>(); tree.insert(100, '100'); tree.insert(50, '50'); tree.insert(150, '150'); tree.insert(125, '125'); tree.insert(200, '200'); tree.insert(40, '40'); expect(`${tree.toString()}`).toMatchSnapshot(); }); it('prints out an empty tree', () => { const tree = new AvlTree<number, string>(); expect(`${tree.toString()}`).toMatchSnapshot(); }); }); describe('map', () => { it('maps the tree nodes onto a list', () => { const tree = new AvlTree<number, number>(); tree.insert(1, 1); tree.insert(2, 2); tree.insert(4, 4); tree.insert(8, 8); tree.insert(16, 16); expect(tree.map(([, value]) => value * value)).toEqual([1, 4, 16, 64, 256]); }); }); describe('forEach', () => { it('executes a function on each node', () => { const tree = new AvlTree<number, string>(); tree.insert(1, '1'); tree.insert(2, '2'); tree.insert(4, '4'); tree.insert(8, '8'); tree.insert(16, '16'); const iter = jest.fn(); tree.forEach(iter); expect(iter).toHaveBeenCalledTimes(5); expect(iter).toHaveBeenNthCalledWith(1, '1', 1, tree); expect(iter).toHaveBeenNthCalledWith(2, '2', 2, tree); expect(iter).toHaveBeenNthCalledWith(3, '4', 4, tree); expect(iter).toHaveBeenNthCalledWith(4, '8', 8, tree); expect(iter).toHaveBeenNthCalledWith(5, '16', 16, tree); }); }); it('works with random case', () => { // Create shuffled list const insertCount = 200; const items: number[] = []; for (let i = 0; i < insertCount; i += 1) { items.push(i); } shuffleList(items); const tree = new AvlTree<number, number>(); for (const item of items) { tree.insert(item, item); checkTree(tree.root); } if (PRINT_TREES) { tree.print(); } // Shuffle list again for removal shuffleList(items); for (const item of items) { // console.log(`Removing ${item}`); const removed = tree.delete(item); // console.log(`After removing ${item}:\n${tree}`); expect(removed).toBe(true); checkTree(tree.root); } expect(tree.size).toBe(0); }); it.skip('is fast', () => { // Create shuffled list const insertCount = 400000; const items: number[] = []; for (let i = 0; i < insertCount; i += 1) { items.push(i); } shuffleList(items); // console.log(JSON.stringify(items)); const tree = new AvlTree<number, number>(); let start = new Date().getTime(); for (const item of items) { tree.insert(item, item); } let elapsed = new Date().getTime() - start; console.log(`Inserted ${insertCount} items in ${elapsed} ms`); start = new Date().getTime(); for (const item of items) { tree.get(item); } elapsed = new Date().getTime() - start; console.log(`Found ${insertCount} items in ${elapsed} ms`); start = new Date().getTime(); for (const item of items) { tree.delete(item); } elapsed = new Date().getTime() - start; console.log(`Removed ${insertCount} items in ${elapsed} ms`); }); }); function shuffleList<T>(array: T[]): T[] { let currentIndex = array.length; // While there remain elements to shuffle... while (currentIndex > 0) { // Pick a remaining element... const randomIndex = Math.floor(Math.random() * currentIndex); currentIndex -= 1; // And swap it with the current element. const temporaryValue = array[currentIndex]; array[currentIndex] = array[randomIndex]; array[randomIndex] = temporaryValue; } return array; } function repeat<T>(times: number, fn: (iteration: number) => T): T[] { const results: T[] = []; for (let i = 0; i < times; i++) { results.push(fn(i)); } return results; }
Chronic obstructive pulmonary disease. Family physicians' role in management. ith the release of new Canadian Chronic Obstructive Pulmonary Disease (COPD) Guidelines, I thought it would be helpful to describe family physicians role in offi ce manage-ment of this common disease. Th e guidelines can be found in their entirety at http://www.pulsus.com/Respir/10_SA/supp_A_master.pdf.The prevalence of COPD is increasing. It is currently the fifth most common cause of death; by 2020, it will rise to third.
A unified approach to numerical homogenization of flow parameters in porous media The aim of upscaling is to determine equivalent homogeneous parameters at a coarse-scale from a spatially oscillating fine-scale parameter distribution. A large variety of upscaling methods is available, among which the homogenization method plays an important role. This paper presents an extension of the classical homogenization method to nonlinear problems as they occur while upscaling parameters of incompressible, immiscible two-phase flow and while dealing with flow in near well regions. In all cases homogenization is based on fine-scale steady-state flow equations with periodic boundary conditions. These equations are solved with the finite element method. Results of numerical experiments are given.
11First, Do No Harm. Improving Safety in Catheter Ablation for Atrial Fibrillation: A Prospective Study of the Use of Ultrasound to Guide Vascular Access Introduction The most frequent complications of AF Ablation (AFA) are related to vascular access but there is little published evidence as to how these can be minimised. In 2012 the European Society of Cardiology clearly stated in their updated guidelines that improving safety of catheter ablation should be a primary goal in the further development of this therapy. Methods In this prospective study, consecutive patients undergoing AFA received either standard femoral vascular access, guided by a landmark technique (Group S), or routine ultrasound-guided vascular access (Group U). Vascular complications were assessed before hospital discharge, and by means of bespoke postal questionnaire one month later. Outcome measures were actionable (BARC2+) bleeding complications, groin pain that required analgesics, and prolonged bruising lasting >2 weeks. Results Patients in Groups S (n = 146) and U (n = 163) were well matched at baseline. Follow up questionnaires were received from 92.6% of patients. Uninterrupted warfarin therapy was used in 65% of cases. There was no difference between the groups in mean INR levels, peak ACT levels or in the use of protamine post-procedure. Procedures in Group U were significantly shorter than those in Group S (184 ± 53 min v. 167 ± 4 min, p = 0.04). Inadvertent femoral arterial puncture, as recognised by the operator, was less common in Group U (10, 6.1%) than in Group S (19,13.0%), p = 0.04. Patients in Group U were significantly less likely to have a BARC 2+ bleed (10.4% v. 19.9% p = 0.02), were less likely to suffer groin pain after discharge (27.1% v. 42.8%, p = 0.006) and were less likely to experience prolonged local bruising (21.5% v. 40.4% p = 0.001). 2 patients in Group S and 1 patient in Group U had major (BARC 3) bleeding requiring blood transfusion (p = NS). Multivariable logistic regression analysis revealed a significant association of vascular complications with non-ultrasound guided access (OR 3.12 95% CI 1.545.34, p = 0.003) and increasing age (OR 1.05 95% CI 1.011.09 p = 0.02). The number needed to treat with ultrasound to prevent one bleeding complication was 11. Discussion Previous work has shown that physician-reported complication rates following AFA miss the majority of vascular events considered important to patients. For this reason we assessed patient reported outcome measures post-discharge, and chose a broad definition of bleeding complications rather than restricting ourselves to life-threatening complications. Accordingly our complication rates seem high at first glance, but we believe they better represent the true patient experience. To our knowledge, this is the first study to show the benefit of adopting a policy of using vascular ultrasound for AFA. Conclusion Routine use of ultrasound guided vascular access for AFA is associated with a significant reduction in bleeding complications, post procedural groin pain and prolonged local bruising when compared to standard vascular access.
/** * Return the total number of seglets this allocator is managing. This includes * free seglets, reserved seglets, and currently allocated ones. */ size_t SegletAllocator::getTotalCount() { return block.length / segletSize; }
Side-Chain Engineering for Enhancing the Molecular Rigidity and Photovoltaic Performance of Noncovalently Fused-Ring Electron Acceptors. Side-chain engineering has been proven to be an effective strategy to regulate the solubility and packing behavior of organic materials. Recently, a unique strategy, so-called terminal side-chain (T-SC) engineering, has attracted much attention in the field of organic solar cells (OSCs), whereas it still lacks deep understanding of the mechanism. Herein, a new noncovalently fused-ring electron acceptor (NFREA) containing two T-SCs ( NoCA-5 ) was designed and synthesized. It is found that the introduction of T-SCs can enhance molecular rigidity and intermolecular - stacking, which is confirmed by the smaller Stokes shift value, lower reorganization free energy, and shorter - stacking distance, in comparison to NoCA-1. Hence, the NoCA-5 -based device exhibits a record power conversion efficiency (PCE) of 14.82% in labs and a certified PCE of 14.5%, resulting from a high electron mobility, a short charge-extraction time, a small Urbach energy ( E u ), and a favorable phase separation. This work provided new ideas for designing high-performance NFREAs.
<reponame>moul/cattle package io.cattle.platform.servicediscovery.dao.impl; import static io.cattle.platform.core.model.tables.InstanceLinkTable.INSTANCE_LINK; import static io.cattle.platform.core.model.tables.InstanceTable.INSTANCE; import static io.cattle.platform.core.model.tables.ServiceConsumeMapTable.SERVICE_CONSUME_MAP; import static io.cattle.platform.core.model.tables.ServiceExposeMapTable.SERVICE_EXPOSE_MAP; import static io.cattle.platform.core.model.tables.ServiceTable.SERVICE; import io.cattle.platform.core.addon.LoadBalancerServiceLink; import io.cattle.platform.core.addon.ServiceLink; import io.cattle.platform.core.constants.CommonStatesConstants; import io.cattle.platform.core.constants.LoadBalancerConstants; import io.cattle.platform.core.model.Instance; import io.cattle.platform.core.model.InstanceLink; import io.cattle.platform.core.model.Service; import io.cattle.platform.core.model.ServiceConsumeMap; import io.cattle.platform.core.model.tables.records.InstanceLinkRecord; import io.cattle.platform.core.model.tables.records.InstanceRecord; import io.cattle.platform.core.model.tables.records.ServiceConsumeMapRecord; import io.cattle.platform.db.jooq.dao.impl.AbstractJooqDao; import io.cattle.platform.json.JsonMapper; import io.cattle.platform.lock.LockCallback; import io.cattle.platform.lock.LockManager; import io.cattle.platform.object.ObjectManager; import io.cattle.platform.object.process.ObjectProcessManager; import io.cattle.platform.object.process.StandardProcess; import io.cattle.platform.object.util.DataUtils; import io.cattle.platform.servicediscovery.api.constants.ServiceDiscoveryConstants; import io.cattle.platform.servicediscovery.api.dao.ServiceConsumeMapDao; import io.cattle.platform.servicediscovery.deployment.impl.lock.ServiceLinkLock; import io.cattle.platform.util.type.CollectionUtils; import java.util.ArrayList; import java.util.List; import java.util.Map; import javax.inject.Inject; public class ServiceConsumeMapDaoImpl extends AbstractJooqDao implements ServiceConsumeMapDao { @Inject ObjectManager objectManager; @Inject ObjectProcessManager objectProcessManager; @Inject JsonMapper jsonMapper; @Inject LockManager lockManager; @Override public ServiceConsumeMap findMapToRemove(long serviceId, long consumedServiceId) { List<ServiceConsumeMap> maps = objectManager.find(ServiceConsumeMap.class, SERVICE_CONSUME_MAP.SERVICE_ID, serviceId, SERVICE_CONSUME_MAP.CONSUMED_SERVICE_ID, consumedServiceId); for (ServiceConsumeMap map : maps) { if (map != null && (map.getRemoved() == null || map.getState().equals(CommonStatesConstants.REMOVING))) { return map; } } return null; } @Override public ServiceConsumeMap findNonRemovedMap(long serviceId, long consumedServiceId, String linkName) { ServiceConsumeMap map = null; if (linkName == null) { map = objectManager.findOne(ServiceConsumeMap.class, SERVICE_CONSUME_MAP.SERVICE_ID, serviceId, SERVICE_CONSUME_MAP.CONSUMED_SERVICE_ID, consumedServiceId, SERVICE_CONSUME_MAP.REMOVED, null); } else { map = objectManager.findOne(ServiceConsumeMap.class, SERVICE_CONSUME_MAP.SERVICE_ID, serviceId, SERVICE_CONSUME_MAP.CONSUMED_SERVICE_ID, consumedServiceId, SERVICE_CONSUME_MAP.NAME, linkName, SERVICE_CONSUME_MAP.REMOVED, null); } if (map != null && !map.getState().equalsIgnoreCase(CommonStatesConstants.REMOVING)) { return map; } return null; } @Override public List<? extends ServiceConsumeMap> findConsumedServices(long serviceId) { return create() .selectFrom(SERVICE_CONSUME_MAP) .where( SERVICE_CONSUME_MAP.SERVICE_ID.eq(serviceId) .and(SERVICE_CONSUME_MAP.REMOVED.isNull())).fetchInto(ServiceConsumeMapRecord.class); } @Override public List<? extends ServiceConsumeMap> findConsumedServicesForInstance(long instanceId, String kind) { return create() .select(SERVICE_CONSUME_MAP.fields()) .from(SERVICE_CONSUME_MAP) .join(SERVICE_EXPOSE_MAP) .on(SERVICE_EXPOSE_MAP.SERVICE_ID.eq(SERVICE_CONSUME_MAP.SERVICE_ID)) .join(SERVICE) .on(SERVICE.ID.eq(SERVICE_CONSUME_MAP.CONSUMED_SERVICE_ID)) .where( SERVICE_EXPOSE_MAP.INSTANCE_ID.eq(instanceId) .and(SERVICE.KIND.eq(kind)) .and(SERVICE_CONSUME_MAP.REMOVED.isNull()) //Don't include yourself .and(SERVICE_CONSUME_MAP.SERVICE_ID.ne(SERVICE_CONSUME_MAP.CONSUMED_SERVICE_ID)) .and(SERVICE_EXPOSE_MAP.REMOVED.isNull())) .fetchInto(ServiceConsumeMapRecord.class); } @Override public List<? extends InstanceLink> findServiceBasedInstanceLinks(long instanceId) { return create() .select(INSTANCE_LINK.fields()) .from(INSTANCE_LINK) .where(INSTANCE_LINK.INSTANCE_ID.eq(instanceId) .and(INSTANCE_LINK.SERVICE_CONSUME_MAP_ID.isNotNull()) .and(INSTANCE_LINK.REMOVED.isNull())) .fetchInto(InstanceLinkRecord.class); } @Override public Instance findOneInstanceForService(long serviceId) { Instance last = null; List<? extends Instance> instances = create() .select(INSTANCE.fields()) .from(INSTANCE) .join(SERVICE_EXPOSE_MAP) .on(SERVICE_EXPOSE_MAP.INSTANCE_ID.eq(INSTANCE.ID)) .where(INSTANCE.REMOVED.isNull() .and(SERVICE_EXPOSE_MAP.SERVICE_ID.eq(serviceId)) .and(SERVICE_EXPOSE_MAP.REMOVED.isNull())) .orderBy(INSTANCE.CREATED.desc()) .fetchInto(InstanceRecord.class); for (Instance instance : instances) { last = instance; if (last.getFirstRunning() != null) { return last; } } return last; } @Override public List<String> findInstanceNamesForService(long serviceId) { return create() .select(INSTANCE.NAME) .from(INSTANCE) .join(SERVICE_EXPOSE_MAP) .on(SERVICE_EXPOSE_MAP.INSTANCE_ID.eq(INSTANCE.ID)) .where(INSTANCE.REMOVED.isNull() .and(SERVICE_EXPOSE_MAP.REMOVED.isNull()) .and(SERVICE_EXPOSE_MAP.SERVICE_ID.eq(serviceId))) .orderBy(INSTANCE.NAME.asc()) .fetch(INSTANCE.NAME); } @Override public ServiceConsumeMap createServiceLink(final Service service, final ServiceLink serviceLink) { return lockManager.lock(new ServiceLinkLock(service.getId(), serviceLink.getServiceId()), new LockCallback<ServiceConsumeMap>() { @Override public ServiceConsumeMap doWithLock() { return createServiceLinkImpl(service, serviceLink); } }); } protected ServiceConsumeMap createServiceLinkImpl(Service service, ServiceLink serviceLink) { ServiceConsumeMap map = findNonRemovedMap(service.getId(), serviceLink.getServiceId(), serviceLink.getName()); boolean update = false; if (map == null) { Map<Object,Object> properties = CollectionUtils.asMap( (Object)SERVICE_CONSUME_MAP.SERVICE_ID, service.getId(), SERVICE_CONSUME_MAP.CONSUMED_SERVICE_ID, serviceLink.getServiceId(), SERVICE_CONSUME_MAP.ACCOUNT_ID, service.getAccountId(), SERVICE_CONSUME_MAP.NAME, serviceLink.getName()); if (serviceLink instanceof LoadBalancerServiceLink) { properties.put(LoadBalancerConstants.FIELD_LB_TARGET_PORTS, ((LoadBalancerServiceLink) serviceLink).getPorts()); } map = objectManager.create(ServiceConsumeMap.class, objectManager.convertToPropertiesFor(ServiceConsumeMap.class, properties)); } else { if (service.getKind() .equalsIgnoreCase(ServiceDiscoveryConstants.KIND.LOADBALANCERSERVICE.name())) { LoadBalancerServiceLink newLbServiceLink = (LoadBalancerServiceLink) serviceLink; List<? extends String> newPorts = newLbServiceLink.getPorts() != null ? newLbServiceLink.getPorts() : new ArrayList<String>(); DataUtils.getWritableFields(map).put(LoadBalancerConstants.FIELD_LB_TARGET_PORTS, newPorts); objectManager.persist(map); update = true; } } if (map.getState().equalsIgnoreCase(CommonStatesConstants.REQUESTED)) { objectProcessManager.scheduleProcessInstance(ServiceDiscoveryConstants.PROCESS_SERVICE_CONSUME_MAP_CREATE, map, null); } if (update) { objectProcessManager.scheduleStandardProcess(StandardProcess.UPDATE, map, null); } return map; } @Override public List<ServiceConsumeMap> createServiceLinks(List<ServiceLink> serviceLinks) { List<ServiceConsumeMap> result = new ArrayList<>(); for (ServiceLink serviceLink : serviceLinks) { Service service = objectManager.loadResource(Service.class, serviceLink.getConsumingServiceId()); if (service == null) { continue; } result.add(createServiceLink(service, serviceLink)); } return result; } @Override public List<? extends ServiceConsumeMap> findConsumedMapsToRemove(long serviceId) { return create() .selectFrom(SERVICE_CONSUME_MAP) .where( SERVICE_CONSUME_MAP.SERVICE_ID.eq(serviceId) .and((SERVICE_CONSUME_MAP.REMOVED.isNull(). or(SERVICE_CONSUME_MAP.STATE.eq(CommonStatesConstants.REMOVING))))). fetchInto(ServiceConsumeMapRecord.class); } @Override public List<? extends ServiceConsumeMap> findConsumingServices(long serviceId) { return create() .selectFrom(SERVICE_CONSUME_MAP) .where( SERVICE_CONSUME_MAP.CONSUMED_SERVICE_ID.eq(serviceId) .and(SERVICE_CONSUME_MAP.REMOVED.isNull())).fetchInto(ServiceConsumeMapRecord.class); } @Override public List<? extends ServiceConsumeMap> findConsumingMapsToRemove(long serviceId) { return create() .selectFrom(SERVICE_CONSUME_MAP) .where( SERVICE_CONSUME_MAP.CONSUMED_SERVICE_ID.eq(serviceId) .and((SERVICE_CONSUME_MAP.REMOVED.isNull(). or(SERVICE_CONSUME_MAP.STATE.eq(CommonStatesConstants.REMOVING))))). fetchInto(ServiceConsumeMapRecord.class); } @Override public List<? extends Service> findLinkedServices(long serviceId) { return create() .select(SERVICE.fields()) .from(SERVICE) .join(SERVICE_CONSUME_MAP) .on(SERVICE_CONSUME_MAP.CONSUMED_SERVICE_ID.eq(SERVICE.ID)) .where( SERVICE_CONSUME_MAP.SERVICE_ID.eq(serviceId) .and(SERVICE_CONSUME_MAP.REMOVED.isNull())).fetchInto(Service.class); } }
Evaluation of a prototype thermal wave imaging system for nondestructive evaluation of composite and aluminum aerospace structures Several issues must be resolved before confidence in infrared thermography nondestructive evaluation (NDE) approaches that of more established NDE methodologies. Foremost among these are system sensitivity, repeatability, and interpretation of results. In recent years, synchronous imaging techniques have been demonstrated using a computer to control the thermal excitation, infrared image acquisition, and image processing. Advantages include simplified system setup and operation, repeatable measurements, and signal-to-noise ratio enhancement. In late 1991, LASC began a beta-site evaluation of a prototype Thermal Wave Imaging (TWI) system developed by the Institute for Manufacturing Research at Wayne State Univ. (Detroit, MI). This prototype is the forerunner of a production version currently under development for commercial offering as a fully integrated thermal NDE system. Applications will include quality assurance of manufactured aerospace composite structures and inspection for first and second-layer corrosion in aluminum (aging) aircraft structures. This paper describes the prototype system, and discusses results of specific experiments selected to demonstrate present capabilities of the system.
Effective Contraceptive Use Following Unplanned Pregnancy Among Ugandan Women Living with HIV Abstract Background Prevention of unplanned pregnancy is critical for women living with HIV (WLWH) to safely achieve their reproductive goals, and forms the second prong of the Global Plan to eliminate perinatal transmission of HIV. Family planning services need to identify women at risk of unplanned pregnancy to be effective. This study examines the relationship between unplanned pregnancy and subsequent use of effective contraception among WLWH in Uganda. Methods This is a retrospective analysis of data from the Uganda Aids Rural Treatment Outcomes study, which was a longitudinal cohort of individuals initiating antiretroviral therapy. Women with incident pregnancies between 2011 and 2013 who reported on intent of the pregnancy were included in this analysis. The exposure of interest was referent pregnancy intent, using questions derived from the CDC PRAMS instrument. The primary outcome was self-report of effective contraceptive use 915 months post-partum (hormonal methods, intrauterine device, sterilization, or consistent condom use). Results Among 455 women who enrolled with a baseline median age of 29 years, CD4 count 403 cells/mm3, and living with HIV for 3.8 years, there were 110 incident pregnancies with reported intent. Of these pregnancies, 50 (45%) were reported as unplanned, and 60 (55%) as planned. Postpartum, 51% of women with unplanned and 44% with planned pregnancy reported effective contraception (P = 0.52). In models adjusted for pregnancy intent, only partner pregnancy desire was significantly associated with contraceptive use, with aRR 0.37 (95% CI 0.180.76, P = 0.01) for effective contraceptive use when the participant reported that her primary partner definitely or probably wants her to have a child compared with never discussed or dont know. Conclusion Almost half of incident pregnancies among WLHW in this cohort were unplanned. Unplanned pregnancy was not associated with effective contraceptive use post-partum. These results demonstrate continued unmet need for family planning services in this population. Creative strategies to support the planning of families among women living with HIV are needed. Engaging men is likely to be a critical approach. Disclosures J. E. Haberer, Merck: Consultant, Consulting fee; Natera: Shareholder, Stock ownership
package com.liugh.dynamic; import java.io.FileInputStream; import java.io.InputStream; import java.util.Properties; import org.springframework.beans.factory.annotation.Value; import org.springframework.boot.SpringApplication; import org.springframework.boot.env.EnvironmentPostProcessor; import org.springframework.context.annotation.PropertySource; import org.springframework.context.annotation.PropertySources; import org.springframework.core.env.ConfigurableEnvironment; import org.springframework.core.env.PropertiesPropertySource; import org.springframework.stereotype.Component; @Component public class MyEnvironmentPostProcessor implements EnvironmentPostProcessor{ //可灵活的加入自己的逻辑来配置或集中化管理配置 @Override public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) { try(InputStream is = new FileInputStream("src/main/resources/app.properties")){ Properties source = new Properties(); source.load(is); PropertiesPropertySource propertySource = new PropertiesPropertySource("my",source); environment.getPropertySources().addLast(propertySource); is.close(); }catch(Exception e){ e.printStackTrace(); } } }
import os from main import main from pprint import pprint def parse(lines): # world bounds wx = int(lines[0].split()[0]) wy = int(lines[0].split()[0]) # initial position x = int(lines[1].split()[0]) y = int(lines[1].split()[1]) cmds = [] # command / step pair it = iter(lines[2].split()) for e in it: cmds.append((e, int(next(it)))) # health and speed health = float(lines[3].split()[0]) speed = float(lines[3].split()[1]) # spawn times nspawn = int(lines[4]) spawns = [] for i in range(nspawn): spawns.append(int(lines[4 + i + 1])) # damage and range damage = float(lines[4 + nspawn + 1].split()[0]) towerrange = int(lines[4 + nspawn + 1].split()[1]) # queries t = int(lines[4 + nspawn + 2]) towers = [] for i in range(t): towertxt = lines[4 + nspawn + 3 + i] towerx = int(towertxt.split()[0]) towery = int(towertxt.split()[1]) towers.append((towerx, towery)) return { "wx": wx, "wy": wy, "x": x, "y": y, "cmds": cmds, "speed": speed, "health": health, "damage": damage, "range": towerrange, "spawns": spawns, "towers": towers } if __name__ == "__main__": level, quests = 4, 5 for i in range(1, quests + 1): input_file = r'..\data\level{0}\level{0}_{1}.in'.format(level, i) output_file = os.path.splitext(input_file)[0] + ".out" with open(input_file, 'r') as fi: data = parse(fi.readlines()) # pprint(data) print("=== Output {}".format(i)) print("======================") result = main(data) pprint(result) with open(output_file, 'w+') as fo: fo.write(result)
Loot boxes, which burst open to reveal randomized rewards in games, don't exist because they're good for game design. They exist because the industry wondered: how do we charge each player the maximum amount they're willing to spend for as long as we can keep them spending? The answer already existed in a model proven successful decades ago by baseball and Magic: The Gathering cards. In his 2013 book, Uncertainty in Games, Greg Costikyan describes the success of Magic's card packs: "...When you purchase and open a booster pack, you are always uncertain what you will obtain—and may experience delight at finding a new card that works well with others you have, or disappointment at receiving cards that duplicate ones you already have, or worse, quintuplicate them—meaning you already have the maximum of this card you can use in a single deck. This is, of course, one reason Magic's business model is so effective: there's always a temptation to buy more cards, and players can be induced, in essence, to spend the maximum amount they are comfortable spending on their game, whether that be a few dollars or a few thousand." Like Magic packs, loot boxes turn the experience of getting stuff, rather than the stuff itself, into what's for sale, and encourage us to keep chasing the delight of getting what we want. They 'work' because they offer an uncertain outcome, and uncertainty is a component of good games, whether it results in a botched saving throw in D&D or a lucky bounce in Rocket League. A box which may or may not contain something rare is not sinful on its own—it's fun. It's adding money to the mix that's the problem. I appreciate that Rocket League, CS:GO, Rainbow Six Siege, Overwatch, and other games only offer cosmetic items in loot boxes, and Overwatch in particular is fairly inoffensive as you can work toward skins without purchasing anything but the game. The way Star Wars Battlefront 2 implemented loot boxes, however, shows that the biggest companies are testing the waters: how much can we put in these things? An entire multiplayer shooter's library of upgrades? They tried. When a progression system is wrapped up in loot boxes which can be purchased with real money, it isn't a fun progression system, practically by definition. If you've made something players can pay to skip, then you've made something worth paying to skip. With Battlefront 2's premium currency temporarily removed, this is hilariously obvious. There is currently no reason for Credits, the non-purchasable currency, to exist, as their only purpose is to abstract achievement so that it can be spent like the premium money, turning 'achievement' into 'grind,' a paycheck rather than a trophy. Not fun. Loot boxes are surely also why generic upgrades can't be applied to multiple classes, and why there's an overcomplicated crafting system—there had to be something to buy even after 20 hours of play. Also not fun. Bad game design which transparently exists to encourage spending is frustrating, especially in a game that already costs $60. What may be worse, though, is that by pairing cash and games of chance, EA and other big publishers are endangering every developer by inviting the scrutiny of politicians. Are loot boxes gambling? Buying loot boxes, like gambling in a casino, can potentially be addictive. Buying loot boxes, like gambling in a casino, can potentially be addictive. "We know that the dopamine system, which is targeted by drugs of abuse, is also very interested in unpredictable rewards," said Dr. Luke Clark, director at the Center for Gambling Research at the University of British Columbia, in a recent interview with PC Gamer . "Dopamine cells are most active when there is maximum uncertainty, and the dopamine system responds more to an uncertain reward than the same reward delivered on a predictable basis." Yet loot boxes are not legally considered gambling in the US and elsewhere, at least according to precedent. A series of 1996 lawsuits brought against baseball card manufacturers under the federal Racketeer Influenced and Corrupt Organizations Act claimed that limited-run "chase cards"—rare, valuable cards that might appear in a pack—constitute an illegal lottery. The suits were not successful. A similar suit against Nintendo in 1999, which claimed that Pokemon cards constituted gambling, was also dismissed. Last year, The Washington State Gambling Commission ordered Valve to "take whatever actions are necessary" to put an end to third-party CS:GO skin gambling sites, where players could bet valuable gun skins on the outcomes of esports matches, among other things. The Gambling Commission did not, however, take aim at the practice of delivering skins randomly. It is seemingly legal to sell boxes—physical or digital—with unknown contents, some more valuable to collectors than others. It's a practice familiar to toy collectors, sometimes called 'blind boxes.' Above: Hell. It's tempting to read recent anti-loot box statements from politicians as a win, but legislators getting involved in game design is uncomfortable. What's the legal difference between loot boxes and roulette? Mainly, it's that in a casino I put down money hoping for it to return to me, whereas when I buy a key for a Rocket League crate I know the money is spent—the gamble is whether or not I'll be satisfied with my purchase. That is an important distinction. However, if the contents of a loot box can be sold for a cash profit, which most can be through sanctioned marketplaces or EULA-defying grey markets, the distinction blurs. Still, unlike gambling, your possible reward is never zero, and the in-game items can't be turned in to the publisher like gambling chips for cash. Their value entirely depends on the value collectors assign them. So, it's different, but is it different enough? While the 1996 lawsuits against baseball card manufacturers alleged that it was not different enough and failed, that doesn't mean legislators will never successfully amend the law. It's unlikely to change, but it's still up for debate. Ebay's policy , for instance, plays it safe by requiring the contents of 'grab bags' to be listed in order to avoid sales which might constitute illegal lotteries in some states. In reality, though, I was easily able to find multiple listings for 'surprise boxes.' Whether they are or aren't lotteries by law is unclear. Do we want them to be? Whether they are or aren't lotteries by law is unclear. Do we want them to be? It's tempting to read recent anti-loot box statements from politicians as a win—we don't like loot boxes, and they're saying they'll get rid of them—but legislators getting involved in game design is concerning. A ban on charging for uncertain rewards would end Hearthstone, Magic: The Gathering, and all 'blind boxes' and 'grab bags' outright—you would not be able to buy anything without knowing its exact contents, or perhaps at least their value—and lawmakers wouldn't necessarily stop there. It could be just the in they need to form government-run ratings boards for games, which I oppose completely. It's not far fetched. In 2005, US Senators Hillary Clinton, Joe Lieberman, Tim Johnson, and Evan Bayh sponsored the Family Entertainment Protection Act, which would have put the ESRB under federal observation and fined stores which sold Mature games to kids under 17. In 2012, Donald Trump tweeted that videogame violence "must be stopped." Nothing has come of these intentions to regulate the sale of games, but if certain game systems were deemed gambling, you can be sure that 'the danger to our kids' would become a standard talking point again. Meanwhile, mobile games haven't needed the element of chance to succeed in selling premium currencies. The legality of Clash of Clans-like schemes (premium currencies that directly translate to boosts and bonuses and power) isn't in dispute. So, if loot boxes were declared illegal, we'd get a small victory in pushing game publishers away from design we don't like, but not necessarily toward design we do, at the expense of increasing government scrutiny which could harm small developers who have no part in this. As much as I want to stick it to corporations, a legal solution is worrisome. And given the precedent, it's also unlikely to succeed. We're talking about defining Magic: The Gathering and baseball cards as illegal racketeering, an accusation they've weathered successfully for years. Above: Hell. What to do They botched one of their biggest launches of the year, ate a bunch of negative press, and could've avoided it all. The dopamine rush described by Dr. Clark is real, and its easy to see how loot boxes could get children and people who are prone to addiction to overspend. For that and many other reasons, I'd love to get rid of them, if not by forcing indie game developers to submit their games to their state's gambling control board for inspection. Frustratingly, though, I doubt the catalyst for change will be reduced profits. The truth is that loot boxes are fun to open. I've purchased keys for Rocket League crates—because I must have the coolest car—and spending $10 here and there hasn't left me with regrets. Many probably feel the same way, so I'm doubtful that 'vote with your wallet' is going to force meaningful change. When they're relatively inoffensive, people are going to keep buying loot boxes, and blaming individual players pointlessly sets us against each other, instead of the people actually responsible: exorbitantly-paid executives and board members. All I can recommend for now is that we keep calling out obnoxious implementations of loot boxes. We may not like what we get when Battlefront 2's premium currency returns, but that EA removed it the day before launch shows that player criticism had a significant effect. They botched one of their biggest launches of the year, ate a bunch of negative press, and could've avoided it all. Whether they end up making money on Battlefront 2 anyway, or losing money, they may think twice about the nature of their in-game purchases next time. Inside the industry, I don't expect any individual to risk their job by publicly criticizing their bosses—we recently spoke to insiders about loot boxes , and they all asked to remain anonymous—though I can't imagine the average game developer employee loves designing simulated slot machines. On that note, there's a lot of work to do on the industry that, while seemingly unrelated, would help. Namely, an end to reliance on temporary contractors, crunch, and high turnover, and reasonable profit expectations that don't require every game to pull in half-a-billion dollars per year in microtransactions . Above: What buying currency in Battlefront 2 looked like, before it was removed. I do think it's understandable that publishers want to earn revenue from existing owners if they're providing a service. Servers cost money. But it feels pretty obvious that they've slowly been working toward something they knew we didn't want, hoping that if they turned up the heat gradually—first pre-order bonuses, then microtransactions, 'games as a service,' and finally cribbing the MTG model—we wouldn't notice that the system is designed to encourage overspending on items. Of course we noticed, and so have legislators, reigniting the 'gambling for children' collectable card game debates from the '90s. Collectible card games managed to slip away from the controversy, but now that it's back, the games industry has to reckon with the ethics of how it applies game systems to monetization, as well as the way it produces games and the profits they're expected to make. If they don't back off, at least a little—say, by only putting cosmetic items in boxes and always providing an alternative way to get them—someone else might make a decision for them.
Design and Analysis of Enhanced Grouping DCF Scheme for the MAC Layer Enhancement of 802.11n with Ultra-high Data Rate The 802.11 has emerged as the prominent wireless LAN technology as the mobile computing devices such as notebooks and PDA have replaced the desktop computers to be the main trend products. However, if the number of active stations is large, that is high-loading condition for the legacy DCF of 802.11, the capacity will be very low due to high collision costs. In this paper, <i>we</i> <i>introduce</i> <i>the</i> <i>TDMA</i> <i>concept</i> <i>to</i> <i>partition</i> <i>all</i> <i>numerous</i> <i>active</i> <i>stations</i> <i>into</i> <i>several</i> <i>groups</i> <i>to</i> <i>avoid</i> <i>all</i> <i>stations</i> <i>transmitting</i> <i>the</i> <i>frames</i> <i>simultaneously</i>. When Point Coordinator (PC, generally referring to AP) finds that the number of active stations (<i>M</i>) is large i.e. bigger than 8, it broadcasts number of groups (<i>N</i>), group head (<i>Nh</i>) bits and start grouping bit sequence (<i>k</i>) (such as 00000100 00000000 00000000) information in the TIM field of the beacon frame. Once all stations receive this instruction, the stations which last two LSB bits (because <i>k</i>=0, <i>N</i>=4) of the MAC address (IEEE EUI-48 or EUI-64) are 00 belonging to group 0 will transfer their frame first. On the contrary, all stations belonging to other groups will set their waiting time, that is, Network Allocation Vector (NAV) much more precisely. In this article, we also proposed a fast selection scheme to get the optimal start grouping bit sequence which aims to partition all the active stations into a few groups more uniformly to reduce the number of members in each group so that we can reduce the Contention window Minimum (<i>CWMiri</i>), that is, backoff's idle overhead.
Transient Response of Synchronous Generator to Faults on the Evacuation Overhead Line Normal operating conditions of synchronous generators are often perturbed by faults occurring on the evacuation overhead power line. Two events occurring on the evacuation overhead power line of Rul Mare Retezat Hydroelectric Power Plant are presented in the paper. It is about a two-phase short circuit and atmospheric discharges respectively. The monitoring was done using a system type Digital Modular Perturbograph (PDM). The first part of the paper is an overview of PDM devices and their characteristics that are described and afterwards the monitoring systems and practical use of PDM solutions are presented. Real cases of monitoring voltages and currents waveforms, their rms values and corresponding angles, are presented in the paper.
/************************************************************** * * fifofifoExternGate - Synchronize on External Gate (Clock) * * * RETURNS: * 0 - OK, error number fifo object undefined */ int fifoExternGate(FIFO_ID pFifoId,int count) { if (pFifoId == NULL) return(HDWAREERROR+INVALIDACODE); if (pFifoId->fifoBrdVersion < 1003) count += 3; fifoStuffCmd(pFifoId,CL_EXT_CLOCK,(ulong_t) count); return(0); }
#include <iostream> #include <algorithm> using namespace std; int n; int m; bool cmp (int x, int y) { int xOdd = abs(x % 2); int yOdd = abs(y % 2); if (x % m != y % m) { return x % m < y % m; } else if (xOdd != yOdd) { return xOdd > yOdd; } else if (xOdd) { return x > y; } else { return x < y; } } int main() { int a[10005]; while (cin >> n >> m) { cout << n << " " << m << endl; if (n == 0) { break; } for (int i = 0; i < n; i++) { cin >> a[i]; } sort(a, a + n, cmp); for (int j = 0; j < n; j++) { cout << a[j] << endl; } } return 0; }
package ruina.monsters.blackSilence.blackSilence4.memories.purple; import basemod.AutoAdd; import com.megacrit.cardcrawl.characters.AbstractPlayer; import com.megacrit.cardcrawl.monsters.AbstractMonster; import ruina.monsters.blackSilence.blackSilence4.BlackSilence4; import static ruina.RuinaMod.makeID; @AutoAdd.Ignore public class Purple2 extends Purple { public final static String ID = makeID(Purple2.class.getSimpleName()); public Purple2(BlackSilence4 parent) { super(parent); this.rawDescription = cardStrings.EXTENDED_DESCRIPTION[1]; this.initializeDescription(); } @Override public void use(AbstractPlayer p, AbstractMonster m) { } @Override public void upp() { } public void onChoseThisOption(){ parent.Summon(); } }
The Performance Of Employees Of Banks With Work Motivation as Moderator This study aims to influence the effectiveness of accounting information systems and work motivation on employee performance. Furthermore, work motivation is a moderator between the effectiveness of accounting information systems and employee performance. This research was conducted at the Rural Bank (BPR) in Gianyar Regency. Retrieval of samples with Purposive Sampling technique. Technical analysis of the data used is using the approach Partial Least Square (PLS). Results of the study show that the effectiveness of the accounting information system and motivation to work affect the performance of employees. Furthermore, work motivation can moderate the effect of the effectiveness of accounting information systems on employee performance.
#include "Globals.h" #include "Core/Application/Application.h" // ----------------------- Graphics Card Usage -------------------------------------------------------- extern "C" { // http://developer.download.nvidia.com/devzone/devcenter/gamegraphics/files/OptimusRenderingPolicies.pdf __declspec(dllexport) DWORD NvOptimusEnablement = 0x00000001; // http://developer.amd.com/community/blog/2015/10/02/amd-enduro-system-for-developers/ // or (if the 1st doesn't works): https://gpuopen.com/amdpowerxpressrequesthighperformance/ or https://community.amd.com/thread/169965 __declspec(dllexport) int AmdPowerXpressRequestHighPerformance = 1; } // ----------------------- Application ---------------------------------------------------------------- int main(int argc, char** argv) { // -- Initialization -- Application* application = new Application(APPLICATION_NAME, WINDOW_WIDTH, WINDOW_HEIGHT, FRAMERATE); // -- App Update -- application->Update(); // -- App Delete -- ENGINE_LOG("--- Closing Application ---") delete application; return 0; }
VMware has taken a significant step towards a converged infrastructure ethos by announcing EVO:RAIL at VMworld in San Francisco. The announcement was made by CEO Pat Gelsinger, who said during the keynote that EVO:RAIL would offer a ‘dramatic simplification’ for those seeking a software-defined enterprise. “EVO:RAIL is an appliance solution for midrange needs that offers dramatic simplification from power on to VMs in 15 minutes or less […] roll it up, take it out-of-the-box [and in] 15 minutes – you're VM operational,” Gelsinger said. The solution will combine both software and hardware to effectively create an out-of-the-box SDDC, however Gelsinger wanted to make it clear that VMware was not stepping into the world of hardware. “This is not a VMware product. We enable our OEMs to deliver. We're not doing hardware, our OEM partners deliver this as a solution,” he said. Partners delivering the solution consist of Dell, EMC, Fujitsu, Inspur, NetOne and Supermicro. The Palo Alto, California-based virtualisation company also launched EVO:RACK. “It’s the [same] power and the same concepts of EVO but done at cloud scale. Where we can scale out an entire data centre; a cloud SDDC environment in two hours or less,” explained Gelsinger. As well as discussing the rebranding of vCloud Air, a partnership with Open Compute Project (OCP) and OpenStack integration, Gelsinger explained his vision for the future of IT. Bravery was the overarching theme, as the chief exec explained all decision makers in this ‘brave new world of IT’ needed courage. “It’s important for everyone in this room and everyone in the community to understand that I will hold VMware accountable to be brave, to push forward and disrupt for the benefit of our customers; and in the turn, you can count on us to lead the way in this generation of brave new IT,” he said. To read the abridged transcript, click here.
The nearly invisible intraneural cyst: a new and emerging part of the spectrum While intraneural ganglion cysts can be associated with any joint, peroneal cysts associated with the superior tibiofibular joint (STFJ) are the most common.7 Intraneural cysts form from synovial joints, via a capsular defect, as synovial fluid dissects along the articular branch toward the parent nerve.1,3,911,1417,20 The extent and dimensions of intraneural cysts are determined by the path of least resistance, intraarticular pressure, and pressure fluxes. In some cases, pressure can drive extreme longitudinal propagation.18 For peroneal intraneural cysts, these principles may lead to phasic propagation, including primary ascent of cyst fluid up the articular branch and common peroneal nerve, cross-over within the sciatic nerve, and terminal branch descent down the tibial nerve.13 In contrast, we have evaluated several patients with peroneal neuropathy and negative MRI findings who, on subsequent closer inspection, have had subtle evidence of an intraneural ganglion cyst. We present a series of cases that highlight the dynamic nature of peroneal intraneural ganglion cysts and describe the nearly invisible cyst as a new and emerging part of the spectrum. Patient Cohort Cases in which nearly invisible peroneal intraneural ganglion cysts were found were retrospectively reviewed and are presented. Case history and physical examination, imaging, and intraoperative findings were reviewed for each case. This study was approved by the Institutional Review Board of the Mayo Clinic. Variables of Interest Data abstracted included neurological examination, electrodiagnostic, and imaging findings; available ultrasound images, MR images, and MR arthrograms were reviewed for abnormalities of the common peroneal nerve and its branches as well as abnormalities of the STFJ. Outcomes of Interest The outcomes of interest were the size and configuration of peroneal intraneural ganglion cysts over time, relative to various interventions that were performed, and in relation to physical examination and electrodiagnostic findings. Case 1: Sequential MR Images Demonstrate a Shrinking Cyst Approximately 3 months prior to our evaluation, a 41-year-old man developed the acute onset of left lateral knee pain and a partial foot drop while performing squatting exercises that progressed to a complete foot drop over several months. Electromyography (EMG) revealed a deep predominant peroneal neuropathy superimposed on an L-5 radiculopathy. MRI showed an extreme peroneal intraneural ganglion cyst, which arose from the STFJ and extended to the sciatic nerve bifurcation ( Fig. 1A and B). Because MRI did not capture the full extent of the cyst, 19 days later repeat MRI with a larger field was performed. The repeat MRI study showed a substantially smaller peroneal intraneural ganglion cyst ( Fig. 1C and D). At the time of our evaluation, the patient had absent tibialis anterior muscle function with normal peroneus muscle function, suggesting a deep peroneal predominant neuropathy. At surgery, the articular branch and the joint surfaces of the STFJ were resected. The common peroneal nerve and the articular branch were clearly cystic (Fig. 1E). Histopathological examination of the articular branch was consistent with an intraneural ganglion cyst (Fig. 1F). At the 6-month follow-up, the patient had Medical Research Council Grade 4-/5 dorsiflexion and normal eversion. At the most recent follow-up, 2.5 years after the operation, he had only trace weakness in his tibialis anterior muscle, did not require an ankle-foot orthosis, and continued to have decreased sensation in the first dorsal web space. Case 2: Ultrasonography Followed by MRI Demonstrates a Shrinking Cyst A 33-year-old woman developed an acute, painless foot drop with no clear inciting event. EMG findings were consistent with a severe common peroneal neuropathy. An ultrasound image of the common peroneal nerve was then obtained, which revealed an intraneural cyst ( Fig. 2A). On our initial examination approximately 3 months after the onset of the patient's symptoms, the patient had only trace activation of the tibialis anterior and peroneus muscles. MRI and MR arthrography were performed, which re- vealed subtle T2 signal in the peroneal nerve and gadolinium within the anterior portion of the peroneal nerve following intraarticular injection, consistent with a possible small intraneural cyst ( Fig. 2B-D); this was in marked contrast to the definite intraneural cyst that was observed on ultrasonography. At surgery, the common peroneal nerve was neurolyzed at the fibular neck, and a cysticappearing articular branch was resected. The STFJ was not resected. Histopathological analysis did not reveal an intraneural cyst (Fig. 2E). At the 6-month follow-up, the patient had normal dorsiflexion and eversion, which continued through the most recent follow-up, approximately 2 years after her operation. Case 3: Utilizing the Internal Topography of the Common Peroneal Nerve A 69-year-old man experienced the insidious onset of a partial right foot drop, which progressed to a complete foot drop over several months. Ultrasonography showed an intraneural peroneal cyst ( Fig. 3A and B). On our evaluation, the patient had minimal activation of the tibialis anterior muscle with near-complete preservation of peroneus muscle function. Four months after the ultrasound study, an MRI study was obtained and showed subtle signs of cyst within the common peroneal nerve and articular branch but appreciably smaller than seen on the previous ultrasound study (Fig. 3C and D). At surgery, the common peroneal nerve was neurolyzed at the fibular neck, and both the articular branch and STFJ were resected. By 1 month postoperatively, the patient had only trace weakness of dorsiflexion and was no longer requiring an anklefoot orthosis. Case 4: Intraosseous Cyst: Clue to an Intraneural Cyst? A 45-year-old man presented with a history of intermittent foot drop with multiple occurrences over the previous 7 years. One month prior to presentation, the patient had an episode of foot drop, but this had largely resolved by the time of our evaluation. EMG revealed evidence of a deep predominant peroneal neuropathy. Ultrasonography performed at the time of EMG reportedly showed a possible small intraneural cyst. Because of the suggestion of a small cyst on ultrasonography, an MRI/MR arthrogram was obtained. While most of the intraarticular gadolinium leaked from a ruptured popliteal cyst into the space surrounding the semimembranosus muscle (Fig. 4A), some of the injected gadolinium filled the intraosseous ganglion cyst, demonstrating the joint connection to the STFJ (Fig. 4B), and a small amount of contrast passed into the common peroneal nerve ( Fig. 4C and D), consistent with a small intraneural cyst. The patient underwent decompression of the peroneal nerve and resection of the articular branch and STFJ. The articular branch appeared enlarged and questionably cystic intraoperatively (Fig. 4E). Pathological examination confirmed the presence of intraneural ganglion cyst. One month postoperatively, the patient had normal dorsiflexion and eversion. Case 5: Abnormal STFJ and Tibialis Anterior Muscle Denervation: Clues to an Intraneural Cyst? A 64-year-old man developed the acute onset of right foot drop without any inciting event. On examination, dorsiflexion was 4-/5 and eversion was 5/5. An MRI study revealed degenerative changes in the STFJ with bone marrow edema in the fibular head and subacute denervation in the tibialis anterior muscle, but the images were read as negative for mass or cyst involving the peroneal nerve. Decompression of the common peroneal nerve was planned. With the patient under mild sedation, the common peroneal nerve was noted to be mildly cystic intraoperatively, so decompression was carried further distally and the trifurcation was uncovered. The articular branch was enlarged and cystic (Fig. 5A). The articular branch was dissected distally to the STFJ and resected. No resection of the STFJ was performed. Histological analysis revealed a fibrotic perineurium with reactive changes consistent with an intraneural cyst (Fig. 5B). On subsequent detailed review of the preoperative MRI study, subtle signs of the presence of an intraneural cyst were present, despite the study being read as negative ( Fig. 5C and D). Four months postoperatively, the patient had recovered normal dorsiflexion. Case 6: Does Size Matter? Small Cyst, Severe Symptoms A 32-year-old man developed acute lateral knee pain that radiated to the great toe after lifting a sofa. The next day he developed a foot drop. EMG showed a deep peroneal predominant neuropathy. An MRI study demonstrated a complex cyst arising from the STFJ with a question- able small peroneal intraneural component, an extraneural component, and an intravascular component ( Fig. 6A and B). On our evaluation, the patient had minimal activation of the tibialis anterior muscle with only trace weakness in the peroneus muscles. Given the possibility of an intraneural component of the cyst on MRI, an MR arthrogram was ordered. The MR arthrogram demonstrated again the complex cyst and showed contrast passing from the STFJ into an intraneural component of the cyst within the common peroneal nerve and its articular branch ( Fig. 6C and D). The patient did not elect to undergo surgery and has not been seen in follow-up. The Nearly Invisible Cyst as Part of the Roller Coaster Phenomenon The formation of intraneural ganglion cysts is a dynamic process. Our findings demonstrate that the life cycle of an intraneural ganglion can involve a phase in which it is nearly invisible. Several snapshots in time may capture dramatic fluctuations in size and configuration, spanning the spectrum and taking on the course of a roller coaster (Fig. 7). Based on serial imaging studies, Cases 1-3 provided the perfect setting to acknowledge the entity, the nearly invisible cyst. Cases 4-6 then provided appreciation of the diagnosis of the same entity in a single snapshot but with supplementary supportive evidence. The nearly invisible cyst described here would be consistent with the "occult" intraneural cyst that our group has recently demonstrated to be isolated to the articular branch of the lateral plantar nerve in a patient who underwent surgery for presumed tarsal tunnel syndrome. 4 We have previously alluded to spontaneous regression of cysts, but here we substantiate this concept and describe spontaneous regression to the point of near-complete resolution, without any evidence of cyst rupture. 8,13 Dynamic Cyst Morphology as Part of the Roller Coaster While these cases demonstrate that cyst size is dynamic, with cysts growing and shrinking over time possibly even to the point of resolution, the roller coaster phenomenon is not limited to size but also involves changes in morphology. It is not limited to the intraneural component but can also involve other compartments involved in the cyst such as the extraneural space and intravascular compartment. We have observed cases in which the intravascular compartment has shrunk to the point of complete resolution (Fig. 8). Interventions such as operative decompression or percutaneous aspiration also shift the dynamics that determine the size and morphology of the cyst such that postoperative recurrent intraneural cysts often take on a different size and morphology from the original cyst (Fig. 9). The morphology of the cyst is a result of constantly changing pressure differences within the STFJ and the compartments involved in the cyst (e.g., intraneural, intravascular, extraneural). Scar formation and inflammation generated by interventions and locoregional changes within the involved compartments, such as blood pressure for the intravascular compartment, potentially alter these pressure dynamics, allowing for alterations in size and morphology. The Nearly Invisible Cyst: The Tip of the Iceberg While once thought to be uncommon, peroneal intra-neural ganglion cysts are increasingly being recognized as an important cause of peroneal neuropathy, occurring in up to 18% of cases. 22 Given the number of patients we see with "negative" MRI findings who turn out to have nearly invisible cysts, even this may be underestimating the true frequency. We believe that a subgroup of patients of unknown size with peroneal neuropathy have unrecognized (nearly invisible) intraneural cysts. An Explanation for "Negative" MRI or Ultrasound Findings All of our cases had very subtle imaging findings, which were or could have been easily missed (explaining "negative" MRI or ultrasound findings), unless the suspicion was there a priori. Cases 1 and 2 clearly demonstrated that intraneural ganglion cysts can shrink dramatically, and, at times, rapidly. MR arthrography can be useful when subtle signs of the presence of an intraneural cyst are not completely convincing to demonstrate the passage of intraarticular contrast into the articular branch and confirm the presence of an intraneural cyst, as in Cases 2, 4, and 6. 5,6,12, 19 The articular branch of the tibial nerve in these studies serves as an internal control. Intraarticular contrast was not seen within this branch in these studies, confirming that passage of the contrast into the peroneal articular branch was pathologic. Imaging may be performed when the cyst happens to be at the nadir of the roller coaster, with only easy to miss, subtle signs on imaging. Particularly if the slice thickness is thick, the findings could be missed altogether. Experienced radiologists using optimal techniques and high-resolution imaging will improve the recognition of these nearly invisible cysts. Clues to the Presence of an Intraneural Cyst Additional clinical and imaging clues can be helpful in identifying nearly invisible cysts, even in cases of "negative" MRI findings. In Cases 2 and 3, the obvious presence of a cyst on ultrasound clued us in to examining the MRI studies in detail for signs of a cyst. However, we believe a number of other clinical and radiological findings can FIG. 9. A 49-year-old man presented with a complete foot drop and pain radiating from the knee to the great toe. MRI was performed (A and B). He underwent operative decompression of the common peroneal nerve and aspiration of the cyst at an outside institution. His cyst subsequently recurred and additional MRI was performed 2 months postoperatively (C and D). A and B: Coronal T2-weighted images with fat suppression showing cyst, within the common peroneal nerve (arrowhead, A) and within the articular branch (arrow, B). C and D: The cyst recurred. Two months postoperatively, coronal T2-weighted images with fat suppression were obtained, showing cyst within the common peroneal nerve (arrowhead) with a loculated appearance and morphology differing from the initial cyst (C) and cyst within the articular branch (arrow, D). be clues to the presence of an intraneural cyst, prompting in-depth review of the imaging studies. First, a number of these cases illustrate what we have anecdotally observed: peroneal intraneural cysts often present with deep peroneal predominance. Fluctuating symptoms, such as seen in Case 4, also seem to correlate with the presence of a cyst. 23 From an imaging standpoint, the deep peroneal predominance can be observed with subacute denervation changes in the tibialis anterior muscle, such as in Cases 2, 3, and 5. The joint of origin is the STFJ, so imaging abnormalities in this joint should also prompt consideration of a cyst. Degenerative changes, joint edema, and intraosseous ganglion cysts, as seen in Cases 4 and 5, suggest an abnormal joint that may be at risk for capsular defects and cyst formation. These imaging findings are clues to scrutinize the articular branch and peroneal nerve for subtle evidence of an intraneural cyst. Frequently, when subtle cyst is observed, it is located only in the anterior portion of the common peroneal nerve, often at around the 12 o'clock position (Figs. 2 and 3). The internal topography of the common peroneal nerve has been described. 2,21 The fascicles of the articular branch run with the fascicles of the deep peroneal nerve in the anterolateral portion of the common peroneal nerve at the level of the fibular head. This is the place to look on MRI. The "clock face" configuration that we have previously described still applies. 20 On both MRI and MR arthrography, the critical positions to look for abnormalities correspond to locations about the clock face where the signet ring, transverse limb, and tail signs appear. In the case of nearly invisible cysts, these signs may not be fully developed, but cyst is present in those topographical locations. Understanding the "critical times" around the clock face often allows one to recognize a number of the important findings that we have described, all in a single MRI slice (Fig. 10). Subtle or Even Negative Pathology In Case 5, we demonstrate the subtle histological changes that can be seen, despite the articular branch being clearly cystic and enlarged at surgery. Despite imaging evidence of a clear intraneural cyst in Case 2, findings from the histopathological analysis of the articular branch specimen were negative. In truth, we believe that the full spectrum of an intraneural ganglion cyst can include negative histological findings that do not reveal cyst in the articular branch. A number of explanations can be postulated for this. It is plausible that the fluid track from its joint origin to the parent nerve is a potential space that allows passage of fluid and then shuts down. The articular branch might not always harbor a true cyst but rather might simply be a passageway. In other instances, a true cyst may form within the articular branch. Finding no evidence of a cyst might represent a sampling error. A true cyst might not be present throughout the entire articular branch, and the portion submitted for review might not contain cyst. Loss of cyst fluid may occur from the operative intervention or from the processing and handling of the specimen. Does the Identification of Small Intraneural Cysts Matter? The roller coaster of intraneural ganglion cysts has a number of implications. Does size matter relative to symptoms? We have observed a number of cases similar to Case 6, where small cysts are associated with severe neurological symptoms. One could interpret this as size not being a determinant in the severity of symptoms. However, it could be that the imaging study available captured the cyst at its nadir, while at its peak it was (and perhaps will be again) a large cyst. So, maybe it is that maximum size in the life of the cyst better correlates with symptoms. Alternatively, it may be that size does not matter relative to symptoms. This question remains unanswered. Small, nearly invisible, unrecognized cysts may contribute to peroneal neuropathy in a significant subset of patients and may be a possible cause of operative treatment failure in a subset of patients. Limitations The main limitation to this study is the cohort size. Each point is illustrated through a single case. A single case, however, is enough to prove the concepts proposed. What remains to be shown is the frequency at which each phenomenon occurs. Each case helps answer questions but also raises questions that should be systematically studied through larger investigations. While these examples help establish reasons that intraneural ganglion cysts may be underrecognized and often missed and provide clues that may help increase recognition, the study does not address treatment recommendations. When a cyst is identified preoperatively, we typically perform a peroneal nerve decompression with resection of both the articular branch and STFJ, but in several examples provided when the cyst was nearly invisible, we performed only resection of the articular branch without resection of the STFJ. When this option should be exercised is unclear. The pathology in these small cysts may still occur at the entrance to the fibular tunnel. The nerve FIG. 10. A 49-year-old man presented with a complete foot drop and numbness in the first dorsal web space. A: Axial T2-weighted MR image with fat suppression demonstrating subtle intraneural cyst (arrow) within the common peroneal nerve at the typical location of the signet ring sign (4-5 o'clock) and within the articular branch (curved arrow) at the typical location of the tail sign (11-12 o'clock). Extraneural cyst (double arrow) and subacute denervation changes in the tibialis anterior (asterisk) are also noted on this slice. Subtle cyst extended to the level of the sciatic nerve bifurcation in the anterior-most portion of the common peroneal nerve (not shown). B: To our surprise, at the time of surgery 7 weeks after the MR image was obtained, the intraneural cyst in the common peroneal nerve (blue loop) and in the articular branch (red loop) had significantly increased in size compared with what was observed on the image. may be slightly enlarged and, as a result, compressed by the deep fascia of the peroneus longus muscle. Decompression may be enough to resolve the symptoms despite not specifically addressing the cyst and source of the pathology. Additional work needs to be done to understand what recognition of nearly invisible cysts means for treatment and outcomes. Conclusions We demonstrate here that peroneal intraneural ganglion cysts ride a roller coaster of change in both size and morphology over time, and we introduce the nearly invisible cyst as one end of the spectrum. The fact that almost invisible cysts exist and are frequently missed on MRI means that intraneural ganglion cysts are likely an underrecognized cause of peroneal neuropathy. Clues to the presence of an intraneural cyst that we have identified may help increase the recognition of nearly invisible cysts.
/** * Created by luotuo on 17-6-30. */ @Service @Transactional("secondTransactionManager") public class PrivilegeConfigService { @Autowired private PrivilegeConfigRepository privilegeConfigRepository; @Autowired private UserPrivilegeService userPrivilegeService; @Autowired private RolePrivilegeService rolePrivilegeService; public List<PrivilegeConfig> findAll() { List<PrivilegeConfig> privilegeConfigs = privilegeConfigRepository.findAll(); return privilegeConfigs; } public List findAllTree() { // FIXME: We need a better way to build this tree in the future! // Find all roots first int level = 1; List<PrivilegeConfig> temp = privilegeConfigRepository.findByLevel(level); List<PrivilegeConfig> res = new ArrayList<>(); if (temp.size() > 0) { for (PrivilegeConfig p1 : temp) { res.add(p1); List<PrivilegeConfig> level2 = privilegeConfigRepository.findByPid(p1.getId()); if (level2.size() > 0) { for (PrivilegeConfig p2 : level2) { res.add(p2); List<PrivilegeConfig> level3 = privilegeConfigRepository.findByPid(p2.getId()); if (level3.size() > 0) { for (PrivilegeConfig p3 : level3) { res.add(p3); } } } } } } else { level = 2; temp = privilegeConfigRepository.findByLevel(level); if (temp.size() > 0) { for (PrivilegeConfig p2 : temp) { res.add(p2); List<PrivilegeConfig> level3 = privilegeConfigRepository.findByPid(p2.getId()); if (level3.size() > 0) { for (PrivilegeConfig p3 : level3) { res.add(p3); } } } } else { level = 3; temp = privilegeConfigRepository.findByLevel(level); return temp; } } return res; } public Iterable<PrivilegeConfig> findAll(int page, int pageSize) { Sort sort = new Sort(Sort.Direction.DESC, "id"); PageRequest pageRequest = new PageRequest(page, pageSize, sort); Iterable<PrivilegeConfig> privilegeConfigs = privilegeConfigRepository.findAll(pageRequest); return privilegeConfigs; } public PrivilegeConfig save(HttpServletRequest request) throws Exception { PrivilegeConfig privilegeConfig = new PrivilegeConfig(); try { long pid = Long.valueOf(request.getParameter("pid")); String name = request.getParameter("name"); String type = request.getParameter("type"); String value = request.getParameter("value"); String url = request.getParameter("url"); int level = getLevelByType(type); //int state = Integer.valueOf(request.getParameter("state")); int state = 1; String levelStr = getLevelStr(level); String stateStr = getStateStr(state); String platform = request.getParameter("platform"); privilegeConfig.setLevel(level); privilegeConfig.setLevel_str(levelStr); privilegeConfig.setName(name); privilegeConfig.setPid(pid); privilegeConfig.setPlatform(platform); privilegeConfig.setState(state); privilegeConfig.setState_str(stateStr); privilegeConfig.setType(type); privilegeConfig.setUrl(url); privilegeConfig.setValue(value); } catch (Exception e) { throw e; } privilegeConfig = privilegeConfigRepository.save(privilegeConfig); return privilegeConfig; } private int getLevelByType(String type) { int res = 1; if (type.equals("目录")) res = 1; else if (type.equals("菜单")) res = 2; else if (type.equals("按钮")) res = 3; return res; } private String getLevelStr(Integer level) { return level.toString() + "级"; } private String getStateStr(Integer state) { if (state == 1) return "正常"; else if (state == 0) return "停用"; else return "停用"; } public PrivilegeConfig update(HttpServletRequest request, long id) throws Exception { PrivilegeConfig privilegeConfig = privilegeConfigRepository.findById(id); if (privilegeConfig == null) privilegeConfig = new PrivilegeConfig(); try { long pid = Long.valueOf(request.getParameter("pid")); String name = request.getParameter("name"); String type = request.getParameter("type"); String value = request.getParameter("value"); String url = request.getParameter("url"); int level = getLevelByType(type); //int state = Integer.valueOf(request.getParameter("state")); int state = 1; String levelStr = getLevelStr(level); String stateStr = getStateStr(state); String platform = request.getParameter("platform"); privilegeConfig.setPid(pid); privilegeConfig.setLevel_str(levelStr); privilegeConfig.setLevel(level); privilegeConfig.setName(name); privilegeConfig.setType(type); privilegeConfig.setValue(value); privilegeConfig.setUrl(url); privilegeConfig.setState_str(stateStr); privilegeConfig.setState(state); privilegeConfig.setPlatform(platform); privilegeConfigRepository.save(privilegeConfig); } catch (Exception e) { throw e; } return privilegeConfig; } public PrivilegeConfig findById(Long id) { return privilegeConfigRepository.findById(id); } public void deleteById(Long id) { // Delete all children List<PrivilegeConfig> children = privilegeConfigRepository.findByPid(id); for (PrivilegeConfig p : children) { List<PrivilegeConfig> pp = privilegeConfigRepository.findByPid(p.getId()); if (pp.size() == 0) { privilegeConfigRepository.delete(p.getId()); } else { for (PrivilegeConfig pChild : pp) { deleteById(pChild.getId()); } } } privilegeConfigRepository.delete(id); } public Iterable<PrivilegeConfig> saveNew(List privileges) { return privilegeConfigRepository.save(privileges); } private int stateStr2Int(String state) { int res = 0; if (state.equals("正常")) res = 1; return res; } private int levelStr2Int(String state) { int res = 0; if (state.equals("1级")) res = 1; else if (state.equals("2级")) res = 2; return res; } public List<PrivilegeConfig> findByPid(long pid) { return privilegeConfigRepository.findByPid(pid); } public List<PrivilegeConfig1> getByPidAndUserId(long pid, long userId) { List<UserPrivilege> userPrivileges = userPrivilegeService.findByUserId(userId); List<PrivilegeConfig> privilegeConfigs = privilegeConfigRepository.findByPid(pid); List<PrivilegeConfig1> res = new ArrayList<>(); for (PrivilegeConfig p : privilegeConfigs) { PrivilegeConfig1 privilegeConfig1 = new PrivilegeConfig1(); privilegeConfig1.set(p, false); for (UserPrivilege u : userPrivileges) { if (p.getId() == u.getPrivilege_id()) privilegeConfig1.setChecked(true); } res.add(privilegeConfig1); } return res; } public List<PrivilegeConfig1> getByPidAndRoleId(long pid, long roleId) { List<RolePrivilege> rolePrivileges = rolePrivilegeService.findByRoleId(roleId); List<PrivilegeConfig> privilegeConfigs = privilegeConfigRepository.findByPid(pid); List<PrivilegeConfig1> res = new ArrayList<>(); for (PrivilegeConfig p : privilegeConfigs) { PrivilegeConfig1 privilegeConfig1 = new PrivilegeConfig1(); privilegeConfig1.set(p, false); for (RolePrivilege r : rolePrivileges) { if (p.getId() == r.getPrivilege_id()) privilegeConfig1.setChecked(true); } res.add(privilegeConfig1); } return res; } public List<PrivilegeConfig> getByIds(List<Long> ids) { return privilegeConfigRepository.findByIds(ids); } public List<PrivilegeConfig> getByIdsAndLevel(List<Long> ids, int level) { return privilegeConfigRepository.findByIdsAndLevel(ids, level); } public List<PrivilegeConfig> getByIdsAndPid(List<Long> ids, Long pid) { return privilegeConfigRepository.findByIdsAndPid(ids, pid); } }
pub struct Abv; impl Abv { pub fn calculate_abv(&self, og: f32, fg: f32) -> f32 { (og - fg) * 131.25 } }
When Samoa Air last week announced it was going to start charging people for airline tickets based on their weight — a concept that Samoa Air CEO Chris Langton defended in news reports as “the fairest way of traveling with your family or yourself” — it set off a flurry of comments, some supportive, some not. Under Samoa Air’s plan, customers are now required to estimate their weight when they book their flight online, and then to weigh in when they arrive at the airport. That number determines the price they will pay and also how much space they will get once they board the plane. “You travel happy, knowing full well that you are only paying for exactly what you weigh … nothing more,” the Samoa Air website says. Established in 2012 as the national carrier of Samoa, the airline connects the Pacific Ocean islands domestically using small propeller planes that seat between three and 10 people. It recently began offering international flights on larger planes to American Samoa. Estimates of price per pound vary with the length of the trip. According to The Wall Street Journal, customers flying to American Samoa will pay 92 U.S. cents a kilogram, or 42 cents a pound, for each flight. A kilogram equals 2.2 pounds. “It’s a pay by weight system and it’s here to stay,” Langton told the Journal. As an example of the new policy, he estimated that a 160-kilogram person on Samoa Air will pay four times as much as a 40-kilogram person, but he or she would also get more space. One reason for the new policy may be the fact that Samoa has the fourth highest obesity rate in the world. Estimates of the percentage of obese people in the population range from 55% to 60%. The ‘Private Lives’ of Customers Although Langton says that the new policy has received a favorable reaction from consumers, it raises some interesting questions, including: Is this discrimination or a smart business model? Are there better ways to achieve the same objective? And will other airlines adopt a similar approach? Wharton marketing professor John Zhang, acknowledging that he is not a lawyer, suggests that charging passengers based on their weight does not fit the legal definition of discrimination based on race, age, sex, nationality, religion or handicap. Indeed, he says, “a small airliner like Samoa with propeller planes can make a legitimate business case to charge passengers based on their weight: A small plane can reach carrying capacity either because it is cubed out [i.e., running out of space] or grossed out [i.e., reaching its weight limit]. In the case of carrying passengers, both dimensions can be maxed out to the detriment of an airline’s profitability.” Samoa Air’s new policy does, however, suggest possible price discrimination given that different customers with different characteristics are charged different prices, Zhang says, although he does not believe that “price discrimination is the main motivation behind the new pricing policy — for two reasons. First, there is no evidence that the travelers who weigh more tend to be less price-sensitive so that a firm could make more profits by charging them more. Second, there are many other ways to do price discrimination more effectively and efficiently if capacity is not an issue.” The fact of the matter is that “petite passengers have been subsidizing overweight passengers when the ticket price is weight blind,” Zhang notes. “Such a practice becomes untenable or even unfair when 55% of a country’s adult population is obese.” Wharton health care management professor Mark V. Pauly makes the point that “in small planes, total weight does matter, so rather than carry fewer passengers, it seems that paying for excess weight is tailoring price to cost; it costs the airline more to take a person who weighs twice as much and so displaces another regular-sized paying customer.” Although people will object, he says, “in an unregulated but competitive market, the general principle is that firms can set the terms of a transaction, and buyers are free to accept [those terms] or not. The key will be if there is competition among airlines or other methods of transportation [so that] people have an alternative. I’m not sure how it is different from charging you for another seat if your cello flies with you.” Samoa’s model is “an example of businesses trying to maximize profits,” Pauly adds. “When you sell services, the cost often depends on the ‘private lives’ of customers … like whether they use their rental car for racing. So this may make sense…. Ethicists will doubtless be upset, but I do not think economists generally will.” The Perception of Fairness Given the somewhat controversial nature of Samoa Air’s move, are there other ways the airline could achieve its goal without focusing on a person’s weight? As Wharton marketing professor Gal Zauberman notes, “consumers will likely react negatively to having to pay for something that they see as part of who they are.” From the perspective of a “behavioral researcher, what you have to think about is whether there is a better way to do the same thing but frame it differently,” he says. “One option is instead of charging per pound, have a baseline price design for heavy people and then give discounts either in the form of a reduced fare or extras, such as free checked luggage, to those who weigh less.” If you do want to charge per pound, he adds, “another option might be that the price is for your total weight, not just how much you personally weigh. You will be allowed a max weight of yourself, your carry on and your checked luggage. If you are a heavy person with only a small carry on, you might pay less than the small guy with the large suitcase. This will make your weight less explicit while accomplishing the same objective.” Under the new policy, passengers with baggage that is over the weight limit pay at the same rate as their personal weight. Wharton marketing professor Deborah Small agrees that there may be “more tactful ways” to accomplish the same goal. For example, Samoa Air could have child discounts (in fact, children under 12 are charged 75% of the adult rate) “or they could even have discounts for low-weight people. The point is that it’s achieving the same objective but framing it in a way that is perceived as fairer and that doesn’t penalize an already stigmatized group in society.” She also points out that the new policy “is a form of price discrimination which is common in many forms,” including student discounts for tickets, early bird specials offered by restaurants on weeknights and dynamic pricing by airlines based on when consumers buy their tickets. “What’s interesting to note about such discrimination is how it is perceived by customers.” Small recently taught a class in which students discussed the Coca-Cola Company’s plans to use vending machines that would change the price of a can of soda depending on the weather. On warmer days, the price would go up, and on colder days, it would go down. “That makes perfect sense from an economic [standpoint],” Small says, “but customers were very upset that the company would take advantage of their thirst on hot days.” The launch was called off. Consumers have “a relationship with firms, and their expectations are much like their expectations in any interpersonal relationships,” Small says. “When a firm acts in a way that violates the social rule of treating people fairly, consumers get offended.” So it’s fine for a restaurant to offer a discount to people on a relatively uncrowded Tuesday, but if the restaurant tried to add a surcharge to people eating there on a busy Saturday, that would be considered unfair. “The fairness of these [deals] is not objective. It’s subjective,” she adds. “It’s hard to say if something is truly fair. It’s whether the customer perceives it to be fair. Often just the way you frame a pricing scheme affects the perception of fairness.” In a comment made to Reuters news agency last week, Samoa Air’s Langton suggested that “the industry has this concept that all people throughout the world are the same size,” adding that airplanes “always run on weight, irrespective of seats.” He also noted that, in some cases — including families with small children — the new policy could make it cheaper to fly. Mass Weigh-in Is it likely other airlines will follow Samoa Air’s strategy? Zauberman thinks not. “The key is that this is a small airline, and implementing this on a large scale, for airlines like the new United or Delta, will be much more complicated and unlikely to catch on any time soon.” Zhang agrees, mainly because the modern jet passenger airplanes “are rarely cubed out or grossed out — the load factor for passenger airlines is about 80% today — and it is a lot easier to focus on inventing new fees.” The Journal article suggests that the major airlines would avoid this type of strategy because of concerns over price discrimination, but also points out that some carriers in the U.S., including Southwest Airlines, “require passengers who can’t fit in a regular coach seat to buy an extra ticket when flights are full. They don’t, however, charge passengers per kilo or pound.” Other news reports suggest the nightmare that would ensue if, on large crowded flights, hundreds of passengers were required to weigh in at the airport. Is Samoa Air’s new policy one more example of a company inserting itself too far into people’s health and habits? Companies, Zhang says, “are in the business of interfering with people’s private lives.”
#!/usr/bin/env node import * as fs from 'fs' import {analyzePackage} from './index' const output = analyzePackage(fs.realpathSync(process.argv[2])) console.log(JSON.stringify(output, undefined, 4))
This—unlike that stupid dress—is one of those Internet puzzles that actually delivers an answer in about five seconds. Did you guess the right one? Probably not, because these are some hyper-realistic drawings from Wales artist Howard Lee. He does a ton of them.
Ginsenoside Rc Is a New Selective UGT1A9 Inhibitor in Human Liver Microsomes and Recombinant Human UGT Isoforms Ginseng is known to have inhibitory effects on UGT1A9 activity. However, little is known about the inhibitory effects of ginsenosides, the major active compounds in ginseng, on UGT1A9 activity. In vitro investigation of UGT1A9 inhibition by ginsenosides was carried out using human liver microsomes (HLMs). Among 10 ginsenosides, ginsenoside Rc was the strongest inhibitor of UGT1A9-mediated mycophenolic acid glucuronidase activity. Further inhibition kinetic studies using HLMs suggested that ginsenoside Rc competitively and noncompetitively inhibited UGT1A9-mediated propofol and mycophenolic acid glucuronidation activities, with Ki values of 2.83 and 3.31 M, respectively. Next, to investigate whether the inhibitory effect of ginsenoside Rc is specific to the UGT1A9 isoform, we studied the inhibitory potency of ginsenoside Rc on nine human uridine diphospho-glucuronosyltransferase (UGT) activities using recombinant human UGT isoforms. Ginsenoside Rc exhibited a 12.9-fold selectivity (which was similar to niflumic acid at 12.5-fold) for UGT1A9 inhibition. Ginsenoside Rc at 50 M also inhibited none of the other UGT isoformspecific activities above 12.0%, except for UGT1A9 (>91.5%) in HLMs, indicating that ginsenoside Rc might be used as a selective UGT1A9 inhibitor in reaction phenotyping studies of new chemical entities. Considering lower plasma concentrations (0.01 M) of ginsenoside Rc in healthy subjects and no induction potential on UGT isoforms, ginsenoside Rc does not cause pharmacokinetic drug interactions with other coadministered drugs metabolized by UGT1A9. SIGNIFICANCE STATEMENT Ginsenoside Rc selectively inhibited UGT1A9-mediated propofol and mycophenolic acid glucuronidation activities in human liver microsomes and recombinant uridine diphospho-glucuronosyltransferase (UGT) isoforms. It exhibited a 12.9-fold selectivity for UGT1A9 inhibition. Therefore, ginsenoside Rc might be used as a selective UGT1A9 inhibitor in reaction phenotyping studies of new chemical entities, such as niflumic acid.
/** * Sets the name of the column in the database that holds the left ids in * the relation. */ public Builder<L, R> leftColumn(String leftColumnName) { Objects.requireNonNull(leftColumnName); this.leftColumn = leftColumnName; return this; }
<filename>NSColor_SKExtensions.h // // NSColor_SKExtensions.h // Skim // // Created by <NAME> on 6/17/07. /* This software is Copyright (c) 2007-2017 <NAME>. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of <NAME> nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #import <Cocoa/Cocoa.h> enum { SKScriptingColorRed = 'Red ', SKScriptingColorGreen = 'Gren', SKScriptingColorBlue = 'Blue', SKScriptingColorYellow = 'Yelw', SKScriptingColorMagenta = 'Mgnt', SKScriptingColorCyan = 'Cyan', SKScriptingColorDarkRed = 'DRed', SKScriptingColorDarkGreen = 'DGrn', SKScriptingColorDarkBlue = 'DBlu', SKScriptingColorBanana = 'Bana', SKScriptingColorTurquoise = 'Turq', SKScriptingColorViolet = 'Viol', SKScriptingColorOrange = 'Orng', SKScriptingColorDeepPink = 'DpPk', SKScriptingColorSpringGreen = 'SprG', SKScriptingColorAqua = 'Aqua', SKScriptingColorLime = 'Lime', SKScriptingColorDarkViolet = 'DVio', SKScriptingColorPurple = 'Prpl', SKScriptingColorTeal = 'Teal', SKScriptingColorOlive = 'Oliv', SKScriptingColorBrown = 'Brwn', SKScriptingColorBlack = 'Blck', SKScriptingColorWhite = 'Whit', SKScriptingColorGray = 'Gray', SKScriptingColorLightGray = 'LGry', SKScriptingColorDarkGray = 'DGry', SKScriptingColorClear = 'Clea' }; @interface NSColor (SKExtensions) + (NSColor *)keySourceListHighlightColor; + (NSColor *)mainSourceListHighlightColor; + (NSColor *)disabledSourceListHighlightColor; + (NSColor *)mainSourceListBackgroundColor; + (NSColor *)sourceListHighlightColorForView:(NSView *)view; - (NSComparisonResult)colorCompare:(NSColor *)aColor; - (void)drawSwatchInRoundedRect:(NSRect)rect; + (id)scriptingRgbaColorWithDescriptor:(NSAppleEventDescriptor *)descriptor; - (id)scriptingRgbaColorDescriptor; - (NSString *)accessibilityValue; - (NSString *)hexString; @end #if !defined(MAC_OS_X_VERSION_10_8) || MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_8 @interface NSColor (SKMountainLionDeclarations) - (CGColorRef)CGColor; @end #endif
An all-CMOS low supply voltage temperature sensor front-end with error correction techniques This paper presents an all-CMOS temperature sensor front-end operating at low supply voltage. The front-end includes a reference voltage generator and a proportional-to-absolute-temperature (PTAT) voltage generator. In order to minimize the errors due to various mismatches, error correction techniques including gain boosting, dynamic element matching, dynamic offset cancellation and clock boosting have been investigated. Detailed analysis and simulation have been presented. Furthermore, experimental results with 0.5 m implementation demonstrated that the lowest supply voltage is 1.1 V for the reference voltage generator and 1 V for the PTAT voltage generator over the temperature range of -55°C to 125°C with acceptable performance for a CMOS-based temperature sensor front-end. The effectiveness of the error correction techniques are also demonstrated experimentally.
Martensitic Transformation in a Ti50ni48fe2 Alloy Studied by Eels Martensitic transformation is the first-order diffusionless phase transformation in solids, and is responsible for unique phenomena such as shape memory effect and superelasticity. Recently, many researchers tried to explain the origin of martensitic transformations from a viewpoint of electronic structure. It is strongly required to investigate electronic state changes associated with a martensitic transformation by an accurate experiment. EELS (Electron Energy-Loss Spectroscopy) was successfully applied to study electronic state in several alloys and oxides, e.g. a composition dependence of density-of-state (DOS) in Cu-3d band was clearly observed for Cu1-xAlx alloys by a core-loss measurement. However, this method has been less applied to studies of phase transformations to date. The purpose of the present work is to investigate electronic state changes associated with a martensitic transformation in a Ti50Ni48Fe2 alloy by EELS. A Ti50Ni48Fe2 alloy was prepared by induction-melting method. Thin-foiled specimens were solution-treated in Ar atmosphere at 1173K for 1hr.,
# encoding: UTF-8 from datetime import datetime, timedelta import json from collections import OrderedDict import pymongo from vtConstant import * from vtGateway import VtOrderData, VtTradeData from ctaConstant import * from ctaObject import * from ctaStrategies import strategyClassDict from ctaStrategyTemplate import TestStrategy from ctaHistoryData import MINUTE_DB_NAME ######################################################################## class BacktestingEngine(object): """ CTA回测引擎 函数接口和策略引擎保持一样, 从而实现同一套代码从回测到实盘。 """ TICK_MODE = 'tick' BAR_MODE = 'bar' #---------------------------------------------------------------------- def __init__(self): """Constructor""" # 本地停止单编号计数 self.stopOrderCount = 0 # stopOrderID = STOPORDERPREFIX + str(stopOrderCount) # 本地停止单字典 # key为stopOrderID,value为stopOrder对象 self.stopOrderDict = {} # 停止单撤销后不会从本字典中删除 self.workingStopOrderDict = {} # 停止单撤销后会从本字典中删除 # 回测相关 self.strategy = None # 回测策略 self.mode = self.BAR_MODE # 回测模式,默认为K线 self.dbClient = None # 数据库客户端 self.dbCursor = None # 数据库指针 self.historyData = [] # 历史数据的列表,回测用 self.initData = [] # 初始化用的数据 self.backtestingData = [] # 回测用的数据 self.dataStartDate = None # 回测数据开始日期,datetime对象 self.strategyStartDate = None # 策略启动日期(即前面的数据用于初始化),同上 self.limitOrderDict = {} # 限价单字典 self.workingLimitOrderDict = {} # 活动限价单字典,用于进行撮合用 self.limitOrderCount = 0 # 限价单编号 self.tradeCount = 0 # 成交编号 self.tradeDict = {} # 成交字典 # 当前最新数据,用于模拟成交用 self.tick = None self.bar = None self.dt = None # 最新的时间 #---------------------------------------------------------------------- def setStartDate(self, startDate='20100416', initDays=30): """设置回测的启动日期""" self.dataStartDate = datetime.strptime(startDate, '%Y%m%d') initTimeDelta = timedelta(initDays) self.strategyStartDate = self.dataStartDate + initTimeDelta #---------------------------------------------------------------------- def setBacktestingMode(self, mode): """设置回测模式""" self.mode = mode #---------------------------------------------------------------------- def loadHistoryData(self, dbName, symbol): """载入历史数据""" self.output(u'开始载入数据') # 首先根据回测模式,确认要使用的数据类 if self.mode == self.BAR_MODE: dataClass = CtaBarData else: dataClass = CtaTickData # 从数据库进行查询 self.dbClient = pymongo.MongoClient() collection = self.dbClient[dbName][symbol] flt = {'datetime':{'$gte':self.dataStartDate}} # 数据过滤条件 self.dbCursor = collection.find(flt) # 将数据从查询指针中读取出,并生成列表 for d in self.dbCursor: data = dataClass() data.__dict__ = d if data.datetime < self.strategyStartDate: self.initData.append(data) else: self.backtestingData.append(data) self.output(u'载入完成,数据量%s' %len(self.backtestingData)) #---------------------------------------------------------------------- def runBacktesting(self): """运行回测""" self.strategy.start() if self.mode == self.BAR_MODE: for data in self.backtestingData: self.newBar(data) else: for data in self.backtestingData: self.newTick(data) #---------------------------------------------------------------------- def newBar(self, bar): """新的K线""" self.bar = bar self.crossLimitOrder() # 先撮合限价单 self.crossStopOrder() # 再撮合停止单 self.strategy.onBar(bar) #---------------------------------------------------------------------- def newTick(self, tick): """新的Tick""" self.tick = tick self.crossLimitOrder() self.crossStopOrder() self.strategy.onTick(tick) #---------------------------------------------------------------------- def initStrategy(self, name, strategyClass, paramDict=None): """初始化策略""" self.strategy = strategyClass(self, name, paramDict) #---------------------------------------------------------------------- def sendOrder(self, vtSymbol, orderType, price, volume, strategy): """发单""" self.limitOrderCount += 1 orderID = str(self.limitOrderCount) order = VtOrderData() order.vtSymbol = vtSymbol order.price = price order.totalVolume = volume order.status = STATUS_NOTTRADED # 刚提交尚未成交 order.orderID = orderID order.vtOrderID = orderID order.orderTime = str(self.dt) # CTA委托类型映射 if orderType == CTAORDER_BUY: order.direction = DIRECTION_LONG order.offset = OFFSET_OPEN elif orderType == CTAORDER_SELL: order.direction = DIRECTION_SHORT order.offset = OFFSET_CLOSE elif orderType == CTAORDER_SHORT: order.direction = DIRECTION_SHORT order.offset = OFFSET_OPEN elif orderType == CTAORDER_COVER: order.direction = DIRECTION_LONG order.offset = OFFSET_CLOSE # 保存到限价单字典中 self.workingLimitOrderDict[orderID] = order self.limitOrderDict[orderID] = order return orderID #---------------------------------------------------------------------- def cancelOrder(self, vtOrderID): """撤单""" if vtOrderID in self.workingLimitOrderDict: order = self.workingLimitOrderDict[vtOrderID] order.status = STATUS_CANCELLED order.cancelTime = str(self.dt) del self.workingLimitOrderDict[vtOrderID] #---------------------------------------------------------------------- def sendStopOrder(self, vtSymbol, orderType, price, volume, strategy): """发停止单(本地实现)""" self.stopOrderCount += 1 stopOrderID = STOPORDERPREFIX + str(self.stopOrderCount) so = StopOrder() so.vtSymbol = vtSymbol so.price = price so.volume = volume so.strategy = strategy so.stopOrderID = stopOrderID so.status = STOPORDER_WAITING if orderType == CTAORDER_BUY: so.direction = DIRECTION_LONG so.offset = OFFSET_OPEN elif orderType == CTAORDER_SELL: so.direction = DIRECTION_SHORT so.offset = OFFSET_CLOSE elif orderType == CTAORDER_SHORT: so.direction = DIRECTION_SHORT so.offset = OFFSET_OPEN elif orderType == CTAORDER_COVER: so.direction = DIRECTION_LONG so.offset = OFFSET_CLOSE # 保存stopOrder对象到字典中 self.stopOrderDict[stopOrderID] = so self.workingStopOrderDict[stopOrderID] = so return stopOrderID #---------------------------------------------------------------------- def cancelStopOrder(self, stopOrderID): """撤销停止单""" # 检查停止单是否存在 if stopOrderID in self.workingStopOrderDict: so = self.workingStopOrderDict[stopOrderID] so.status = STOPORDER_CANCELLED del self.workingStopOrderDict[stopOrderID] #---------------------------------------------------------------------- def crossLimitOrder(self): """基于最新数据撮合限价单""" # 先确定会撮合成交的价格 if self.mode == self.BAR_MODE: buyCrossPrice = self.bar.low # 若买入方向限价单价格高于该价格,则会成交 sellCrossPrice = self.bar.high # 若卖出方向限价单价格低于该价格,则会成交 else: buyCrossPrice = self.tick.lastPrice sellCrossPrice = self.tick.lastPrice # 遍历限价单字典中的所有限价单 for orderID, order in self.workingLimitOrderDict.items(): # 判断是否会成交 buyCross = order.direction==DIRECTION_LONG and order.price>=buyCrossPrice sellCross = order.direction==DIRECTION_SHORT and order.price<=sellCrossPrice # 如果发生了成交 if buyCross or sellCross: # 推送成交数据 self.tradeCount += 1 # 成交编号自增1 tradeID = str(self.tradeCount) trade = VtTradeData() trade.vtSymbol = order.vtSymbol trade.tradeID = tradeID trade.vtTradeID = tradeID trade.orderID = order.orderID trade.vtOrderID = order.orderID trade.direction = order.direction trade.offset = order.offset trade.price = order.price trade.volume = order.totalVolume trade.tradeTime = str(self.dt) self.strategy.onTrade(trade) self.tradeDict[tradeID] = trade # 推送委托数据 order.tradedVolume = order.totalVolume order.status = STATUS_ALLTRADED self.strategy.onOrder(order) # 从字典中删除该限价单 del self.workingLimitOrderDict[orderID] #---------------------------------------------------------------------- def crossStopOrder(self): """基于最新数据撮合停止单""" # 先确定会撮合成交的价格,这里和限价单规则相反 if self.mode == self.BAR_MODE: buyCrossPrice = self.bar.high # 若买入方向停止单价格低于该价格,则会成交 sellCrossPrice = self.bar.low # 若卖出方向限价单价格高于该价格,则会成交 else: buyCrossPrice = self.tick.lastPrice sellCrossPrice = self.tick.lastPrice # 遍历限价单字典中的所有限价单 for stopOrderID, so in self.workingStopOrderDict.items(): # 判断是否会成交 buyCross = so.direction==DIRECTION_LONG and so.price<=buyCrossPrice sellCross = so.direction==DIRECTION_SHORT and so.price>=sellCrossPrice # 如果发生了成交 if buyCross or sellCross: # 推送成交数据 self.tradeCount += 1 # 成交编号自增1 tradeID = str(self.tradeCount) trade = VtTradeData() trade.vtSymbol = so.vtSymbol trade.tradeID = tradeID trade.vtTradeID = tradeID self.limitOrderCount += 1 orderID = str(self.limitOrderCount) trade.orderID = orderID trade.vtOrderID = orderID trade.direction = so.direction trade.offset = so.offset trade.price = so.price trade.volume = so.volume trade.tradeTime = str(self.dt) self.strategy.onTrade(trade) self.tradeDict[tradeID] = trade # 推送委托数据 so.status = STOPORDER_TRIGGERED order = VtOrderData() order.vtSymbol = so.vtSymbol order.symbol = so.vtSymbol order.orderID = orderID order.vtOrderID = orderID order.direction = so.direction order.offset = so.offset order.price = so.price order.totalVolume = so.volume order.tradedVolume = so.volume order.status = STATUS_ALLTRADED order.orderTime = trade.tradeTime self.strategy.onOrder(order) # 从字典中删除该限价单 del self.workingStopOrderDict[stopOrderID] #---------------------------------------------------------------------- def insertData(self, dbName, collectionName, data): """考虑到回测中不允许向数据库插入数据,防止实盘交易中的一些代码出错""" pass #---------------------------------------------------------------------- def loadBar(self, dbName, collectionName, startDate): """直接返回初始化数据列表中的Bar""" return self.initData #---------------------------------------------------------------------- def loadTick(self, dbName, collectionName, startDate): """直接返回初始化数据列表中的Tick""" return self.initData #---------------------------------------------------------------------- def getToday(self): """获取代表今日的datetime对象""" # 这个方法本身主要用于在每日初始化时确定日期,从而知道该读取之前从某日起的数据 # 这里选择策略启动的日期 return self.strategyStartDate #---------------------------------------------------------------------- def writeCtaLog(self, content): """记录日志""" print content #---------------------------------------------------------------------- def output(self, content): """输出内容""" print content #---------------------------------------------------------------------- def test(): """""" engine = BacktestingEngine() engine.setBacktestingMode(engine.BAR_MODE) engine.initStrategy(u'测试', TestStrategy) engine.setStartDate() engine.loadHistoryData(MINUTE_DB_NAME, 'IF0000') engine.runBacktesting()
<reponame>allenfancy/com.allen.enhance package org.com.allen.enhance.basic.desginpattern.abstractfactory; public class ManFactory implements HumanFactory { @Override public Human createYellowHuman() { return new ManYellowHuman(); } @Override public Human createBlackHuman() { return new ManBlackHuman(); } @Override public Human createWhiteHuman() { return new ManWhiteHuman(); } }
class PipelineEvent: """Represents an event in the ZeffClient pipeline. The pipeline will generate record configuration, build a record from the configuration, validate the record, and then upload the record to Zeff Cloud. """ def __init__(self, phase: PipelinePhase, record: logging.LogRecord): self.__phase = phase self.__record = record def __str__(self): return f"{self.timestamp.isoformat()} [{self.phase.name}] {self.level.name}: {self.message}" @property def timestamp(self) -> datetime.datetime: """When the event was emitted.""" return datetime.datetime.fromtimestamp(self.__record.created) @property def phase(self) -> PipelinePhase: """Pipeline phase that generated this event.""" return self.__phase @property def level(self) -> PipelineLevel: """Pipeline level for this event.""" for level in PipelineLevel: if level.value <= self.__record.levelno: return level return PipelineLevel.Debug @property def message(self) -> str: """Message that is part of the event.""" return self.__record.getMessage()
1. Field of the Invention The present invention generally relates to keyboards used for inputting operations of computer apparatuses. More specifically, the present invention relates to a keyboard having a light emitting function which can be operated even under dark environments. 2. Description of the Related Art Conventionally, keyboards which can be operated even in dark environments have been developed. A keyboard using EL (electroluminescence) elements is suggested in, for example, Japanese Laid-Open Patent Application Publication No. 2002-251937. In this keyboard, a character, a mark, or the like is printed on an upper surface of a key top. An elastic member and a link mechanism are provided at a lower part of the key top. A reinforcing plate and a membrane sheet having a contact point are provided under the elastic member and the link mechanism. Light is irradiated from a rear side of the key top by the EL element provided at a lower part of the membrane sheet. The light is irradiated from the rear side of the key top so that the character, mark, or the like printed on the upper surface of the key top is brightly luminous. As a result of this, the character, the mark, or the like printed on the upper surface of the key top can be visually recognized even in a dark environment. Keyboards having LEDs (light emitting diodes) and light guide plates are suggested in, for example, Japanese Laid-Open Patent Application Publication No. 6-22017 and Japanese Laid-open Patent Application Publication No. 2007-280810. While a lighting operating life of the EL element is approximately 3000 hours which is relatively short, it is possible to continuously use the LED for a long time. In these keyboards, characters or the like printed on the key tops are luminous. However, in the keyboard suggested in Japanese Laid-Open Patent Application Publication No. 2002-251937, in addition to their being a problem of the lighting operating life of the EL element, an area is required where a driving electric source is arranged because a high voltage is used. Accordingly, the keyboard may be large so that the keyboard may be heavy. Furthermore, in the keyboards described in Japanese Laid-Open Patent Application Publication No. 6-22017 and Japanese Laid-Open Patent Application Publication No. 2007-280810, light emitted by the corresponding LEDs is incident on side surfaces facing each other of the light guide plate. Accordingly, in order to obtain even and sufficient luminance, it is necessary to provide two boards or the like where the LEDs are arranged, left and right or up and down. Therefore, the keyboard may be large so that the keyboard may be heavy. It is desirable that a key board used for the notebook type computer apparatus be as small as possible.
use crate::routes::setting::{RankingOrdering, SettingBody}; use indexmap::IndexMap; use log::*; use meilidb_core::criterion::*; use meilidb_core::Highlight; use meilidb_core::{Index, RankedMap}; use meilidb_schema::{Schema, SchemaAttr}; use serde::{Deserialize, Serialize}; use serde_json::Value; use std::cmp::Ordering; use std::collections::{HashMap, HashSet}; use std::convert::From; use std::error; use std::fmt; use std::time::{Duration, Instant}; #[derive(Debug)] pub enum Error { SearchDocuments(String), RetrieveDocument(u64, String), DocumentNotFound(u64), CropFieldWrongType(String), AttributeNotFoundOnDocument(String), AttributeNotFoundOnSchema(String), MissingFilterValue, UnknownFilteredAttribute, Internal(String), } impl error::Error for Error {} impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use Error::*; match self { SearchDocuments(err) => write!(f, "impossible to search documents; {}", err), RetrieveDocument(id, err) => write!( f, "impossible to retrieve the document with id: {}; {}", id, err ), DocumentNotFound(id) => write!(f, "document {} not found", id), CropFieldWrongType(field) => { write!(f, "the field {} cannot be cropped it's not a string", field) } AttributeNotFoundOnDocument(field) => { write!(f, "field {} is not found on document", field) } AttributeNotFoundOnSchema(field) => write!(f, "field {} is not found on schema", field), MissingFilterValue => f.write_str("a filter doesn't have a value to compare it with"), UnknownFilteredAttribute => { f.write_str("a filter is specifying an unknown schema attribute") } Internal(err) => write!(f, "internal error; {}", err), } } } impl From<meilidb_core::Error> for Error { fn from(error: meilidb_core::Error) -> Self { Error::Internal(error.to_string()) } } pub trait IndexSearchExt { fn new_search(&self, query: String) -> SearchBuilder; } impl IndexSearchExt for Index { fn new_search(&self, query: String) -> SearchBuilder { SearchBuilder { index: self, query, offset: 0, limit: 20, attributes_to_crop: None, attributes_to_retrieve: None, attributes_to_search_in: None, attributes_to_highlight: None, filters: None, timeout: Duration::from_millis(30), matches: false, } } } pub struct SearchBuilder<'a> { index: &'a Index, query: String, offset: usize, limit: usize, attributes_to_crop: Option<HashMap<String, usize>>, attributes_to_retrieve: Option<HashSet<String>>, attributes_to_search_in: Option<HashSet<String>>, attributes_to_highlight: Option<HashSet<String>>, filters: Option<String>, timeout: Duration, matches: bool, } impl<'a> SearchBuilder<'a> { pub fn offset(&mut self, value: usize) -> &SearchBuilder { self.offset = value; self } pub fn limit(&mut self, value: usize) -> &SearchBuilder { self.limit = value; self } pub fn attributes_to_crop(&mut self, value: HashMap<String, usize>) -> &SearchBuilder { self.attributes_to_crop = Some(value); self } pub fn attributes_to_retrieve(&mut self, value: HashSet<String>) -> &SearchBuilder { self.attributes_to_retrieve = Some(value); self } pub fn add_retrievable_field(&mut self, value: String) -> &SearchBuilder { let attributes_to_retrieve = self.attributes_to_retrieve.get_or_insert(HashSet::new()); attributes_to_retrieve.insert(value); self } pub fn attributes_to_search_in(&mut self, value: HashSet<String>) -> &SearchBuilder { self.attributes_to_search_in = Some(value); self } pub fn add_attribute_to_search_in(&mut self, value: String) -> &SearchBuilder { let attributes_to_search_in = self.attributes_to_search_in.get_or_insert(HashSet::new()); attributes_to_search_in.insert(value); self } pub fn attributes_to_highlight(&mut self, value: HashSet<String>) -> &SearchBuilder { self.attributes_to_highlight = Some(value); self } pub fn filters(&mut self, value: String) -> &SearchBuilder { self.filters = Some(value); self } pub fn timeout(&mut self, value: Duration) -> &SearchBuilder { self.timeout = value; self } pub fn get_matches(&mut self) -> &SearchBuilder { self.matches = true; self } pub fn search(&self, reader: &heed::RoTxn) -> Result<SearchResult, Error> { let schema = self.index.main.schema(reader); let schema = schema.map_err(|e| Error::Internal(e.to_string()))?; let schema = match schema { Some(schema) => schema, None => return Err(Error::Internal(String::from("missing schema"))), }; let ranked_map = self.index.main.ranked_map(reader); let ranked_map = ranked_map.map_err(|e| Error::Internal(e.to_string()))?; let ranked_map = ranked_map.unwrap_or_default(); let start = Instant::now(); // Change criteria let mut query_builder = match self.get_criteria(reader, &ranked_map, &schema)? { Some(criteria) => self.index.query_builder_with_criteria(criteria), None => self.index.query_builder(), }; // Filter searchable fields if let Some(fields) = &self.attributes_to_search_in { for attribute in fields.iter().filter_map(|f| schema.attribute(f)) { query_builder.add_searchable_attribute(attribute.0); } } if let Some(filters) = &self.filters { let mut split = filters.split(':'); match (split.next(), split.next()) { (Some(_), None) | (Some(_), Some("")) => return Err(Error::MissingFilterValue), (Some(attr), Some(value)) => { let ref_reader = reader; let ref_index = &self.index; let value = value.trim().to_lowercase(); let attr = match schema.attribute(attr) { Some(attr) => attr, None => return Err(Error::UnknownFilteredAttribute), }; query_builder.with_filter(move |id| { let attr = attr; let index = ref_index; let reader = ref_reader; match index.document_attribute::<Value>(reader, id, attr) { Ok(Some(Value::String(s))) => s.to_lowercase() == value, Ok(Some(Value::Bool(b))) => { (value == "true" && b) || (value == "false" && !b) } Ok(Some(Value::Array(a))) => { a.into_iter().any(|s| s.as_str() == Some(&value)) } _ => false, } }); } (_, _) => (), } } query_builder.with_fetch_timeout(self.timeout); let docs = query_builder.query(reader, &self.query, self.offset..(self.offset + self.limit)); let mut hits = Vec::with_capacity(self.limit); for doc in docs.map_err(|e| Error::SearchDocuments(e.to_string()))? { // retrieve the content of document in kv store let mut fields: Option<HashSet<&str>> = None; if let Some(attributes_to_retrieve) = &self.attributes_to_retrieve { let mut set = HashSet::new(); for field in attributes_to_retrieve { set.insert(field.as_str()); } fields = Some(set); } let mut document: IndexMap<String, Value> = self .index .document(reader, fields.as_ref(), doc.id) .map_err(|e| Error::RetrieveDocument(doc.id.0, e.to_string()))? .ok_or(Error::DocumentNotFound(doc.id.0))?; let mut matches = doc.highlights.clone(); // Crops fields if needed if let Some(fields) = self.attributes_to_crop.clone() { for (field, length) in fields { let _ = crop_document(&mut document, &mut matches, &schema, &field, length); } } // Transform to readable matches let matches = calculate_matches(matches, self.attributes_to_retrieve.clone(), &schema); if !self.matches { if let Some(attributes_to_highlight) = self.attributes_to_highlight.clone() { let highlights = calculate_highlights( document.clone(), matches.clone(), attributes_to_highlight, ); for (key, value) in highlights { if let Some(content) = document.get_mut(&key) { *content = value; } } } } let matches_info = if self.matches { Some(matches) } else { None }; let hit = SearchHit { hit: document, matches_info, }; hits.push(hit); } let time_ms = start.elapsed().as_millis() as usize; let results = SearchResult { hits, offset: self.offset, limit: self.limit, processing_time_ms: time_ms, query: self.query.to_string(), }; Ok(results) } pub fn get_criteria( &self, reader: &heed::RoTxn, ranked_map: &'a RankedMap, schema: &Schema, ) -> Result<Option<Criteria<'a>>, Error> { let current_settings = match self.index.main.customs(reader).unwrap() { Some(bytes) => bincode::deserialize(bytes).unwrap(), None => SettingBody::default(), }; let ranking_rules = &current_settings.ranking_rules; let ranking_order = &current_settings.ranking_order; if let Some(ranking_rules) = ranking_rules { let mut builder = CriteriaBuilder::with_capacity(7 + ranking_rules.len()); if let Some(ranking_rules_order) = ranking_order { for rule in ranking_rules_order { match rule.as_str() { "_sum_of_typos" => builder.push(SumOfTypos), "_number_of_words" => builder.push(NumberOfWords), "_word_proximity" => builder.push(WordsProximity), "_sum_of_words_attribute" => builder.push(SumOfWordsAttribute), "_sum_of_words_position" => builder.push(SumOfWordsPosition), "_exact" => builder.push(Exact), _ => { let order = match ranking_rules.get(rule.as_str()) { Some(o) => o, None => continue, }; let custom_ranking = match order { RankingOrdering::Asc => { SortByAttr::lower_is_better(&ranked_map, &schema, &rule) .unwrap() } RankingOrdering::Dsc => { SortByAttr::higher_is_better(&ranked_map, &schema, &rule) .unwrap() } }; builder.push(custom_ranking); } } } builder.push(DocumentId); return Ok(Some(builder.build())); } else { builder.push(SumOfTypos); builder.push(NumberOfWords); builder.push(WordsProximity); builder.push(SumOfWordsAttribute); builder.push(SumOfWordsPosition); builder.push(Exact); for (rule, order) in ranking_rules.iter() { let custom_ranking = match order { RankingOrdering::Asc => { SortByAttr::lower_is_better(&ranked_map, &schema, &rule).unwrap() } RankingOrdering::Dsc => { SortByAttr::higher_is_better(&ranked_map, &schema, &rule).unwrap() } }; builder.push(custom_ranking); } builder.push(DocumentId); return Ok(Some(builder.build())); } } Ok(None) } } #[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Serialize, Deserialize)] pub struct MatchPosition { pub start: usize, pub length: usize, } impl Ord for MatchPosition { fn cmp(&self, other: &Self) -> Ordering { match self.start.cmp(&other.start) { Ordering::Equal => self.length.cmp(&other.length), _ => self.start.cmp(&other.start), } } } pub type HighlightInfos = HashMap<String, Value>; pub type MatchesInfos = HashMap<String, Vec<MatchPosition>>; // pub type RankingInfos = HashMap<String, u64>; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SearchHit { #[serde(flatten)] pub hit: IndexMap<String, Value>, #[serde(rename = "_matchesInfo", skip_serializing_if = "Option::is_none")] pub matches_info: Option<MatchesInfos>, } #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct SearchResult { pub hits: Vec<SearchHit>, pub offset: usize, pub limit: usize, pub processing_time_ms: usize, pub query: String, // pub parsed_query: String, // pub params: Option<String>, } fn crop_text( text: &str, matches: impl IntoIterator<Item = Highlight>, context: usize, ) -> (String, Vec<Highlight>) { let mut matches = matches.into_iter().peekable(); let char_index = matches.peek().map(|m| m.char_index as usize).unwrap_or(0); let start = char_index.saturating_sub(context); let text = text.chars().skip(start).take(context * 2).collect(); let matches = matches .take_while(|m| (m.char_index as usize) + (m.char_length as usize) <= start + (context * 2)) .map(|match_| Highlight { char_index: match_.char_index - start as u16, ..match_ }) .collect(); (text, matches) } fn crop_document( document: &mut IndexMap<String, Value>, matches: &mut Vec<Highlight>, schema: &Schema, field: &str, length: usize, ) -> Result<(), Error> { matches.sort_unstable_by_key(|m| (m.char_index, m.char_length)); let attribute = schema .attribute(field) .ok_or(Error::AttributeNotFoundOnSchema(field.to_string()))?; let selected_matches = matches .iter() .filter(|m| SchemaAttr::new(m.attribute) == attribute) .cloned(); let original_text = match document.get(field) { Some(Value::String(text)) => text, Some(_) => return Err(Error::CropFieldWrongType(field.to_string())), None => return Err(Error::AttributeNotFoundOnDocument(field.to_string())), }; let (cropped_text, cropped_matches) = crop_text(&original_text, selected_matches, length); document.insert( field.to_string(), serde_json::value::Value::String(cropped_text), ); matches.retain(|m| SchemaAttr::new(m.attribute) != attribute); matches.extend_from_slice(&cropped_matches); Ok(()) } fn calculate_matches( matches: Vec<Highlight>, attributes_to_retrieve: Option<HashSet<String>>, schema: &Schema, ) -> MatchesInfos { let mut matches_result: HashMap<String, Vec<MatchPosition>> = HashMap::new(); for m in matches.iter() { let attribute = schema .attribute_name(SchemaAttr::new(m.attribute)) .to_string(); if let Some(attributes_to_retrieve) = attributes_to_retrieve.clone() { if !attributes_to_retrieve.contains(attribute.as_str()) { continue; } }; if let Some(pos) = matches_result.get_mut(&attribute) { pos.push(MatchPosition { start: m.char_index as usize, length: m.char_length as usize, }); } else { let mut positions = Vec::new(); positions.push(MatchPosition { start: m.char_index as usize, length: m.char_length as usize, }); matches_result.insert(attribute, positions); } } for (_, val) in matches_result.iter_mut() { val.sort_unstable(); val.dedup(); } matches_result } fn calculate_highlights( document: IndexMap<String, Value>, matches: MatchesInfos, attributes_to_highlight: HashSet<String>, ) -> HighlightInfos { let mut highlight_result: HashMap<String, Value> = HashMap::new(); for (attribute, matches) in matches.iter() { if attributes_to_highlight.contains("*") || attributes_to_highlight.contains(attribute) { if let Some(Value::String(value)) = document.get(attribute) { let value: Vec<_> = value.chars().collect(); let mut highlighted_value = String::new(); let mut index = 0; for m in matches { if m.start >= index { let before = value.get(index..m.start); let highlighted = value.get(m.start..(m.start + m.length)); if let (Some(before), Some(highlighted)) = (before, highlighted) { highlighted_value.extend(before); highlighted_value.push_str("<em>"); highlighted_value.extend(highlighted); highlighted_value.push_str("</em>"); index = m.start + m.length; } else { error!("value: {:?}; index: {:?}, match: {:?}", value, index, m); } } } highlighted_value.extend(value[index..].iter()); highlight_result.insert(attribute.to_string(), Value::String(highlighted_value)); }; } } highlight_result } #[cfg(test)] mod tests { use super::*; #[test] fn calculate_highlights() { let data = r#"{ "title": "Fondation (<NAME>)", "description": "En ce début de trentième millénaire, l'Empire n'a jamais été aussi puissant, aussi étendu à travers toute la galaxie. C'est dans sa capitale, Trantor, que l'éminent savant <NAME> invente la psychohistoire, une science toute nouvelle, à base de psychologie et de mathématiques, qui lui permet de prédire l'avenir... C'est-à-dire l'effondrement de l'Empire d'ici cinq siècles et au-delà, trente mille années de chaos et de ténèbres. Pour empêcher cette catastrophe et sauver la civilisation, Seldon crée la Fondation." }"#; let document: IndexMap<String, Value> = serde_json::from_str(data).unwrap(); let mut attributes_to_highlight = HashSet::new(); attributes_to_highlight.insert("*".to_string()); let mut matches: HashMap<String, Vec<MatchPosition>> = HashMap::new(); let mut m = Vec::new(); m.push(MatchPosition { start: 0, length: 9, }); matches.insert("title".to_string(), m); let mut m = Vec::new(); m.push(MatchPosition { start: 510, length: 9, }); matches.insert("description".to_string(), m); let result = super::calculate_highlights(document, matches, attributes_to_highlight); let mut result_expected = HashMap::new(); result_expected.insert( "title".to_string(), Value::String("<em>Fondation</em> (<NAME>)".to_string()), ); result_expected.insert("description".to_string(), Value::String("En ce début de trentième millénaire, l'Empire n'a jamais été aussi puissant, aussi étendu à travers toute la galaxie. C'est dans sa capitale, Trantor, que l'éminent savant <NAME> invente la psychohistoire, une science toute nouvelle, à base de psychologie et de mathématiques, qui lui permet de prédire l'avenir... C'est-à-dire l'effondrement de l'Empire d'ici cinq siècles et au-delà, trente mille années de chaos et de ténèbres. Pour empêcher cette catastrophe et sauver la civilisation, Seldon crée la <em>Fondation</em>.".to_string())); assert_eq!(result, result_expected); } }
Performance comparison between multiple-quantum-well modulator-based and vertical-cavity-surface-emitting laser-based smart pixels. We compared multiple-quantum-well modulator-based smart pixels and vertical-cavity-surface-emitting laser (VCSEL) based smart pixels in terms of optical switching power, switching speed, and electric-power consumption. Optoelectronic circuits integrating GaAs field-effect transistors are designed for smart pixels of both types under the condition that each pixel has an optical threshold and gain. It is shown that both types perform maximum throughput of ~3 Tbps/cm. In regard to design flexibility, the modulator type is advantageous because switching time can be reduced by supplying large electric power, whereas switching time and electric-power consumption are limited to larger than certain values in the VCSEL type. In contrast, in regard to optical implementation, the VCSEL type is advantageous because it does not need an external bias-light source, whereas the modulator type needs bias-light arrays that must be precisely located because the small modulator diameter, <10 m, is essential to high-speed operation. A bias-light source that increases the total power consumption of the system may offset the advantages of the modulator type.
Featuring clothing and textile collections online Purpose The purpose of this research is to analyse the display of digital images found on clothing and textile collection websites. Design/methodology/approach Features noted included where on the website the images were found, such as in a display or as part of a database. Display features are documented, including enlargement abilities, the view of the artefact, the use of dress forms and mannequins, and the context in which the artefact was pictured. The text that describes the artefact is also documented. The instrument was a content analysis of clothing and textile collection websites. Data were collected in 2006 from 57 clothing and textile collection websites. Findings All 57 costume and textile museums had images of collection artefacts online, with the majority sharing a featured artefact. Almost half of the websites used images in databases and displays. Enlargement abilities were not common; most of the visuals showed the front view of the artefact. Enlargements were more common in displays. Mannequins and dress forms were used infrequently. Detailed text to explain the artefacts was available in the databases. Research limitations/implications The research was limited to observing 57 websites. Originality/value Common features used by costume and textile museums when displaying collection pieces online were identified. Suggestions as to what content to include in a website for clothing and textile collections are discussed in light of the data collected.
// fetchUtxos loads the unspent transaction outputs for the provided set of // outputs into the view from the database as needed unless they already exist // in the view in which case they are ignored. func fetchUtxos(view *txo.UtxoViewpoint, db database.Transactor, outpoints map[protos.OutPoint]struct{}) error { if len(outpoints) == 0 { return nil } neededSet := make(map[protos.OutPoint]struct{}) for outpoint := range outpoints { if view.Exist(outpoint) { continue } neededSet[outpoint] = struct{}{} } return FetchUtxosMain(view, db, neededSet) }
Aqueous basic solutions: hydroxide solvation, structural diffusion, and comparison to the hydrated proton. Many hydrogen-bonded liquids, molecular solids, and lowdimensional systems support anomalous diffusion mechanisms of topological charge defects created by the addition or removal of protons. The most familiar examples are the classic cases of aqueous acidic and basic solutions,1 where the defects appear in the form of hydrated hydronium (H3O) and hydroxide (OH-) ions, denoted as H+(aq) and OH-(aq), respectively.2 While anomalous charge migration has important consequences in chemical,1,3,4 biological,5-8 and technological9,10 applications, Vide infra, providing a molecular-level, mechanistic understanding of the fascinating physical principles underlying the charge transport process is a challenging, yet fundamental, problem in physical chemistry.11
<reponame>hypha/zoopla from __future__ import print_function __author__ = 'raquel' import collections import pandas as pd from api_factory import api as API from geo_info import GeoInfo from map import Map def zoopla_list(area, listing_status, minimum_beds, maximum_price): listings = [] api = API(version=1, api_key='k4uew92e27kzs7nbrk93uguh') for listing in api.property_listings(area=str(area), listing_status=listing_status, max_results=None, minimum_beds=minimum_beds, maximum_price=maximum_price): listings.append(listing) return listings # get all of the listings, sort each by keys def sort_listing_dic(area, listing_status, minimum_beds, maximum_price): listings = zoopla_list(area, listing_status, minimum_beds, maximum_price) for listing in listings: if "new_home" not in listing.__dict__: listing.__dict__.update({"new_home": None}) collections.OrderedDict(sorted(listing.__dict__.items())) return listings def property_keys(properties): keys = properties[0].__dict__.keys() return sorted(keys) def property_df(properties): df = pd.DataFrame(p.__dict__ for p in properties) return df def property_location(properties): geo_listings = [] for listing in properties: geo_listings.append(GeoInfo(listing)) return geo_listings # properties = sort_listing_dic("edinburgh", "sale", "2", "350000") # # locations_info = property_location(properties) # # postcodes = [x.postcode for x in locations_info] # formatted_addresses = [x.address() for x in locations_info] # # prop_df = property_df(properties) # locations = [x.loc_info for x in locations_info] # # prop_df["Postcode"] = postcodes # prop_df["formatted_address"] = formatted_addresses # # dep_df = pd.ExcelFile("./Deprivation_Index_2016.xls") # # dep_full = dep_df.parse("All postcodes") # # zoopla_dep = prop_df.merge(dep_full, on=["Postcode"]) # # df3 = zoopla_dep[["formatted_address", "details_url", "latitude", "longitude"]][zoopla_dep["SIMD16_Vigintile"] > 17] # # map = Map() # # for i in range(len(df3)): # map.add_point((df3.iloc[i].latitude, df3.iloc[i].longitude)) # # # with open("output1.html", "w") as out: # print(map, file=out) def makemap(): ## for london properties = sort_listing_dic("sutton, london", "sale", "2", "350000") locations_info = property_location(properties) postcodes = [x.postcode for x in locations_info] formatted_addresses = [x.address() for x in locations_info] prop_df = property_df(properties) locations = [x.loc_info for x in locations_info] prop_df["Postcode"] = postcodes prop_df["formatted_address"] = formatted_addresses dep_df = pd.ExcelFile("./sutton-deprivation-data.xlsx") dep_full = dep_df.parse("Sheet1") zoopla_dep = prop_df.merge(dep_full, on=["Postcode"]) df3 = zoopla_dep[["formatted_address", "details_url", "latitude", "longitude"]][zoopla_dep["Index of Multiple Deprivation Decile"] >= 8] map = Map() for i in range(len(df3)): map.add_point((df3.iloc[i].latitude, df3.iloc[i].longitude)) return map with open("output1.html", "w") as out: print(map, file=out)
The village fund utilization and its implication for public health improvement in the pandemic era Introduction: The allocation of village funds to the health sector focused on reducing nutritional problems and involving the village in controlling non-natural disasters caused by COVID-19. The study aimed to analyze the implication of the village fund toward public health performance in the pandemic era. Methods: Ecological analysis was conducted using secondary data from the East Java Health Profile 2020. The sample was 29 districts of East Java, where all the villages receive village funds. Apart from village fund utilization as the dependent variable, the independent variable consists of nutritional status, active Posyandu, case of COVID-19 and recovery rate of COVID-19. Data were analyzed by univariate and bivariate. The bivariate analysis was performed using the chi-squared test. Results: Undernourished and wasting were silent health burdens in the pandemic. The pandemic also made the active Posyandu is lower. High attention to Kediri dan Jombang was grouped to vulnerable health areas. Statistically, no correlation between the village fund utilization with public health improvement in the pandemic era Conclusion: Although there is no statistical correlation, nutrition issues and the impact of using village funds on health must be a concern. An in-depth evaluation needs to be carried out on the use of village funds to get clear implications for public health
. The analysis is presented of the results of epidemiological studies of 180 subjects of either sex aged 35-44 years from the standpoint of their needs for crowns and bridges. The proportion of subjects in need of prosthetic crowns was 30.55%, while 18.33% required bridges. The material was subjected to statistical analysis.
<gh_stars>0 #pragma once #include <string> #include <ZEngineDef.h> #include <Event/EventType.h> #include <Event/EventCategory.h> #define EVENT_TYPE(x) \ static ZEngine::Event::EventType GetStaticType() { \ return ZEngine::Event::EventType::x; \ } #define EVENT_CATEGORY(x) \ static int GetStaticCategory() { \ return ZEngine::Event::EventCategory::x; \ } namespace ZEngine::Event { class CoreEvent { public: CoreEvent() = default; virtual ~CoreEvent() = default; void SetHandled(bool value) { m_handled = value; } bool IsHandled() const { return m_handled; } const std::string& GetName() const { return m_name; } void SetName(const char* value) { m_name = std::string(value); } virtual EventType GetType() const = 0; virtual int GetCategory() const = 0; virtual std::string ToString() const = 0; protected: bool m_handled{false}; std::string m_name{}; }; } // namespace ZEngine::Event
// Copyright (c) 2016 <NAME> #include "State.h" State::State(const Name& name, Population population) :m_name{name}, m_population{population} { } Name State::name() const { return m_name; } Population State::population() const { return m_population; }
<reponame>vandetho/react-native-fullscreen-gallery<filename>src/Thumbnail/index.tsx export { default as IndicatorGallery } from './IndicatorGallery'; export { default as PreviewGallery } from './PreviewGallery'; export { default as ThumbnailGallery } from './ThumbnailGallery';
package com.jeeplus.weixin.fastweixin.api; import com.jeeplus.weixin.fastweixin.api.config.ApiConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * 微信智能 * @author:yuzp17311 * @version:v1.0 * @date: 2017-02-27 17:55. */ public class SemanticAPI extends BaseAPI { private static final Logger logger= LoggerFactory.getLogger(SemanticAPI.class); /** * 构造方法,设置apiConfig * * @param config 微信API配置对象 */ protected SemanticAPI(ApiConfig config) { super(config); } }
from flask import Blueprint main = Blueprint('main', __name__) from . import views, errors from flask_restful import Api from .api.gfcard import GiftCard api = Api(main) api.add_resource(GiftCard, '/giftcard/request') from apscheduler.schedulers.background import BackgroundScheduler from .jobs.giftCardSchedule import GiftCardSchedule giftCArdSchedule = GiftCardSchedule() scheduler = BackgroundScheduler() scheduler.add_job(giftCArdSchedule.test, 'interval', seconds=3) scheduler.start()
Recent Advances in Potential Clinical Application of Ghrelin in Obesity Ghrelin is the natural ligand of the growth hormone secretagogue receptor (GHS-R1a). Ghrelin is a 28 amino acid peptide possessing a unique acylation on the serine in position 3 catalyzed by ghrelin O-acyltransferase (GOAT). Ghrelin stimulates growth hormone secretion, but also appetite, food intake, weight gain, and gastric emptying. Ghrelin is involved in weight regulation, obesity, type 2 diabetes, and metabolic syndrome. Furthermore, a better understanding of ghrelin biology led to the identification of molecular targets modulating ghrelin levels and/or its biological effects: GOAT, ghrelin, and GHS-R1a. Furthermore, a recent discovery, showing the involvement of bitter taste receptor T2R in ghrelin secretion and/or synthesis and food intake, suggested that T2R could represent an additional interesting molecular target. Several classes of ghrelin-related pharmacological tools for the treatment of obesity have been or could be developed to modulate the identified molecular targets. Introduction Ghrelin, the natural ligand of the growth hormone secretagogue receptor (GHS-R1a), is a potent stimulator of growth hormone secretion. Moreover, ghrelin is also an appetite-stimulating hormone inducing food intake and weight gain in human, and promoting gastric emptying. Ghrelin is a 28 amino acid peptide predominantly produced and secreted into the blood stream by the endocrine stomach mucosal cells named "X/A like" in rat and P/D1 cells in humans. Ghrelin has the particularity to be acylated on the serine in position 3. During the processing of preproghrelin, both ghrelin 1-28 and ghrelin 1-27 can result and then are subjected to the acylation of the hydroxyl group of Ser3. Acylation, a unique peptidic modification, is catalyzed by ghrelin O-acyltransferase, a member of the membrane-bound O-acyltransferase family, during the processing of the peptide. The most frequently acylation is with an octanoyl group (C8:0), and more rarely with a decanoyl (C10:0) or a decenoyl (C10:1) group. Acylation of ghrelin can be increased by ingestion of either medium-chain fatty acids or medium-chain triacylglycerides. Des-acyl ghrelin represents more than 90% of human plasma ghrelin immunoreactivity. It remains presently uncertain if both ghrelin and des-acyl ghrelin, present in the stomach, are both secreted into the bloodstream via similar or different regulated pathway(s). In the rat stomach, ghrelin is deacylated by lysophospholipase I, and degraded by N-terminal proteolysis. Shorter halflife of ghrelin compared to des-acyl ghrelin and plasma ghrelin deacylation could account for the vast predominance of des-acyl ghrelin in the circulation. Human butyrylcholinesterase and other esterase(s), such as plateletactivating factor acetylhydrolase, and rat carboxylesterase are responsible for ghrelin desoctanoylation in these species. Interestingly, butyrylcholinesterase knockout mice fed with a normal standard 5% fat diet had normal body weight while mice fed with high-fat diet (11% fat) became obese. Butyrylcholinesterase was suggested to play a role in fat catabolism as the obese phenotype could not be explained by increased ghrelin, caloric intake, or decreased exercise. The suggested participation of human paraoxonase in ghrelin deacylation remains controversial. Due to ghrelin degradation by serum, it is difficult to accurately 2 Journal of Obesity determine the ghrelin level and consequently its physiological and pathophysiological roles. In the circulation, des-acyl ghrelin is mostly present as a free peptide while the vast majority of acyl ghrelin is bound to larger molecules and in particular to lipoproteins. The presence of the acyl group is necessary for ghrelin interaction with triglyceriderich lipoproteins and low-density lipoprotein but not highdensity lipoproteins and very high-density lipoproteins. Besides, ghrelin interacts via its N-and C-terminal parts with high-density lipoproteins and very high-density lipoproteins. These data support the transport of acylated ghrelin by triglyceride-rich lipoproteins and of both ghrelin and desacyl ghrelin by high-density lipoproteins and very highdensity lipoproteins. Modifications to lipoprotein levels in response to obesity may affect ghrelin transport as well as free ghrelin levels. Administration of ghrelin to rats leads to stimulation of food intake and decrease of energy expenditure, accounting for body weight increase [6,. Intravenous ghrelin administration in humans also increases appetite and stimulates food intake. Plasma ghrelin levels are negatively correlated with BMI and fluctuate in a compensatory manner to body weight variations. Indeed, plasma ghrelin level is increased in anorexia nervosa and cachexia, and decreased in obesity. Ghrelin levels decrease with weight gain resulting from overfeeding, pregnancy, olanzapine treatment, or high fat diet. Central ghrelin administration to rats submitted to high fat diet does not result in greater food intake, while increased adiposity in white adipose tissue occurs. In white adipose tissue, ghrelin stimulates the gene expression of lipogenic enzymes such as stearoyl CoA desaturase, acetyl CoA carboxylase, and fatty acid synthase. These data suggest that central ghrelin simultaneously regulates food intake and adipose tissue metabolism through distinct mechanisms. Acute feeding response appears to be mediated by GHS-R1a. Chronic weight gain effect of ghrelin may be modulated by both GHS-R1a as well as an as yet unidentified receptor for ghrelin as both ghrelin and a ghrelin antagonist induced body weight gain. Nevertheless, further studies would be required to clarify this issue. Des-acyl ghrelin has been recently taken into consideration as a modulator of food intake that could act through an as yet unidentified receptor. However, des-acyl ghrelin appears to have controversial effects on food intake. Indeed, GOAT knockout mice displayed reduced fat mass despite increased des-acyl ghrelin levels. The identification of speculated des-acyl ghrelin receptors could deeply increase our knowledge on the mechanisms and actions sites of this peptide. High plasma ghrelin levels have been reported in patients with Prader-Willi syndrome (PWS), a genetic disorder characterized by mental retardation and hyperphagia leading to severe obesity. In this disorder, ghrelin may be responsible, at least partially, for the insatiable appetite and the obesity of the patients. From the molecular biological point of view, it is interesting to note that both ghrelin and its receptor (GHSR) genes are located on chromosome 3 in regions that have been linked to obesity. Polymorphisms of both ghrelin and its receptor GHSR1a have been studied in obesity. However, further studies are required to assess unambiguously the functional significance of these mutations in the pathogenesis of obesity. Due to the observed association between plasma ghrelin levels and insulin levels as well as insulin resistance, it was suggested that inhibition of ghrelin secretion and/or of GHS-R1a could be a useful treatment and/or prevention for type 2 diabetes. In this respect, data from numerous studies evaluating the therapeutic implications of ghrelin on glucose-insulin homeostasis have been recently reviewed. The involvement of ghrelin in obesity led to the development of several ghrelin-related pharmacological tools for the treatment of obesity. The present review focuses on the recent advances made in potential clinical applications of ghrelin in obesity. Ghrelin O -Acyltransferase: A Pharmacological Target to Decrease Acylated Ghrelin Levels Ghrelin O-acyltransferase (GOAT), identified as being an orphan membrane-bound O-acyltransferase (MBOAT), catalyzes the addition of an octanoyl group on the serine in position 3 (Figure 1). Ghrelin octanoylation is essential for its recognition by GHS-R1a. Two distinct scientific approaches led to the identification of GOAT. The first approach was based on gene-silencing experiments targeting a candidate gene encoding an uncharacterized protein containing structural motifs reminiscent of the MBOAT acyltransferase family in the human medullary thyroid carcinoma cells expressing octanoylated ghrelin. The authenticity of the human gene identified was verified using RT-PCR and 5 RACE reactions, and the predicted protein encoded by the gene was named GOAT. The functional activity of GOAT, the octanoylation of ghrelin, was demonstrated by transient transfections of the GOAT cDNA. GOAT was also shown to acylate ghrelin with fatty acid ranging from C7 to C12. Furthermore, additional experiments revealed the importance of the conserved MBOAT-histidine residue in position 338 of GOAT to its acylation activity. The second approach was based on the transfection of rat insulinoma Ins-1 cells with ghrelin cDNA and subsequently of one at a time, of the sixteen MBOAT cDNAs. Only one MBOAT cDNA led to acylated ghrelin production. Furthermore, mutation of either serine in position 3 of ghrelin or of the conserved MBOAT-histidine residue in position 338 of GOAT abolished ghrelin acylation. GOAT exhibits some specificity for medium chain fatty acids like octanoate and proceeds to ghrelin octanoylation before its translocation to the Golgi where it is cleaved by prohormone convertase 1/3 to form mature ghrelin. This suggested that GOAT is located in the endoplasmic reticulum. GOAT is a highly hydrophobic protein with eight postulated membrane-spanning helices presenting a high degree of sequence conservation across vertebrates. GOAT is coexpressed with acyl ghrelin in ghrelin-expressing tissues. GOAT displays a preference for hexanoyl-CoA over octanoyl-CoA as an acyl donor. However, the precise mechanism leading to the entry of acyls-CoA into the endoplasmic reticulum lumen remains unknown. One hypothesis is that GOAT could possibly bind acyl-CoA and, due to its hydrophobic properties, allow the acylation of ghrelin in the endoplasmic reticulum lumen. An in vitro biochemical assay for GOAT activity revealed the importance of proper recognition of several amino acids in proghrelin (glycine-1, serine-3, and phenylalanine-4) for GOAT activity. Fasting and satiation could modulate the activity of GOAT as ghrelin levels rise before meals and decrease with food intake. Moreover, long-term fasting inhibits ghrelin acylation but not total ghrelin secretion whereas feeding suppresses both acyl and des-acyl ghrelin. However, the effect of fasting and feeding on GOAT mRNA levels remain unclear. Experimental evidences showed that GOAT is a leptin-regulated gene. Increased GOAT mRNA levels in response to long-term chronic malnutrition could represent the underlying mechanism responsible for increased acylated ghrelin levels in anorexia nervosa. Dietary lipids are critical for the activation of GOAT, and consequently ghrelin acylation. Indeed, GOAT knock-out mice submitted to a diet containing 10% medium-chain triglyceride exhibited lower body weight that can be explained by lower fat mass compared to wild-type mice. In addition, GOAT transgenic mice only fed with a mediumchain triglycerides supplementation produced large amounts of acyl ghrelin. An essential function of ghrelin could be the maintenance of viability during periods of famine. This hypothesis is supported by the data showing that wild-type and GOAT knock-out mice submitted to 60% calorie-restricted diet displayed 30% and 75% body weight loss, respectively. Much work remains to be done to fully understand how GOAT fits into the control of energy homeostasis. However, measurement of both GOAT protein levels and GOAT activity will be crucial to determine its gene expression and functional regulation. Indeed, GOAT knock-out mice represent a valuable tool to determine the physiological consequences of a specific deficiency in acylated ghrelin. Recently, genetic variation of GOAT was suggested to be involved in the etiology of anorexia nervosa. It would be interesting to determine if genetic variation of GOAT might also be linked to obesity. If this proves to be the case, personalized medicine targeting GOAT could be envisioned as a novel therapeutic approach for the treatment of obesity. Pharmacological tools have been developed to target the inhibition of GOAT (Figure 1). Indeed, a pentapeptide, corresponding to the first five N-terminal amino acids of ghrelin with its C-terminal end amidated competitively inhibited GOAT activity through an end-product inhibition mechanism. The inhibition of GOAT is better achieved when pentapeptides contain an octanoyl group linked to serine-3 by an amide linkage. Moreover, GOAT was also inhibited by peptide-based bisubstrate analog, GO-CoA-Tat, in cultured cells, as well as in mice. The design of this bisubstrate analog was based on the theory that GOAT could use a ternary complex mechanism to proceed to the linkage of octanoyl-CoA to ghrelin. The intraperitoneal administration led to reduced weight gain and improved glucose tolerance in wild-type mice but not in ghrelin knock-out mice. Even though GO-CoA-Tat presents some limitations as a peptide-based drug, it is likely that future synthetic derivatizations will maximize its pharmacological properties. In conclusion, GOAT represents an extremely promising candidate for the development of antiobesity and/or antidiabetes drugs. Indeed, it is the unique enzyme responsible for ghrelin acylation and its modulation would only affect the physiological process of ghrelin acylation. Neutralization of Ghrelin Vaccination against ghrelin represents a strategy to block the effects of ghrelin (Figure 1). Rats immunized with ghrelin hapten immunoconjugates led to the production of antibodies specifically directed against acylated ghrelin, and reduced body weight gain with preferential reduction of fat mass concomitant to decreased feeding efficiency. The human relevance of using vaccination against ghrelin remains uncertain. Indeed, phase I/II a trial using CYT 009-Ghr Qb vaccine, from Cytos Biotechnology AG, demonstrated no weight-loss effect in obese humans despite efficient antibody response. High-affinity antiacyl ghrelin specific monoclonal antibodies specifically bind acyl ghrelin, dose-dependently inhibits GHS-R1a activation in vitro, and block ghrelin-induced food intake in mice in vivo. Neutralization of ghrelin was also achieved using spiegelmers, antisense polyethylene glycol-modified L-oligonucleotides capable of specifically binding a target molecule (Figure 1). The spiegelmer NOX-B11-2 decreased food intake and body weight in diet-induced obese mice. Another spiegelmer, NOX-B11-3 exerted a long-lasting action on the inhibition of ghrelin-induced GH release in rats, but did not block the fasting-induced neuronal activation in the hypothalamic arcuate nucleus. The neutralization of circulating ghrelin by spiegelmers may be useful to treat diseases associated with high ghrelin levels such as PWS characterized by severe obesity. Pfizer Inc. has taken over further development of the NOX-B11 spiegelmers originally developed by NOXXON Pharma AG. In conclusion, the therapeutic usefulness of vaccination against ghrelin and the use of ghrelin spiegelmers in the treatment of obesity remain to be proven. GHS-R1a Antagonists. The inhibition of ghrelin signaling represents an attractive target for pharmacological treatment of type 2 diabetes, obesity, particularly PWS, and metabolic syndrome. Consequently, several classes of GHS-R1a antagonists have been developed (Figure 1). Piperidine-substituted quinazolinone derivatives were identified as a novel class of small GHS-R1a antagonists molecules. Phenyl or phenoxy groups are optimal substituents at position 6 of the quinazolinone core, and the replacement of phenyl groups in position 2 by small alkyl substituents were proven to be beneficial. YIL-781, a piperidine-substituted quinazolinone derivative acting as a potent GHS-R1a antagonist, improved glucose-stimulated insulin secretion and reduced food intake and weight loss in diet-induced obese mice. Optimization of piperazine-bisamide analogs synthesis led to potent GHS-R1a antagonists. One of these analogs featured especially high potency as well as other interesting pharmacological properties, and inhibited GH release ex vivo. Several carbohydrazide derivatives were identified as being potent and selective GHS-R1a antagonists. Among these compounds, GSK1614343 was shown to be a potent competitive antagonist of rat GHS-R1a. Unexpectedly, GSK1614343 produced an increase in food intake and body weight in both rats and dogs. In conclusions, several classes of GHS-R1a antagonists have been identified and could represent an interesting pharmacological tools for the treatment of obesity as well as type 2 diabetes and metabolic syndrome. However, longterm animal and human studies still remain necessary to appropriately evaluate the beneficial properties of ghrelin antagonists in the context of obesity. GHS-R1a Inverse Agonists. The high constitutive activity of GHS-R1a suggested that inverse GHS-R1a agonists, decreasing its constitutive activity, may be useful for the treatment of obesity. Long fasting induced, in the hypothalamus, increased GHS-R1a expression and concomitant signaling causing higher appetite and decreased energy expenditure. Therefore, reduction of the GHS-R1a constitutive activity by an inverse agonist could increase the sensitivity to anorexigenic hormones like leptin or PYY, and prevent food intake between meals. In conclusion, GHR-R1a inverse agonists represent interesting pharmacological tool to inhibit GHS-R1a activity ( Figure 1). However, additional studies evaluating the longterm use of the compounds in animal models are necessary to elucidate their usefulness in the treatment of obesity and related diseases in humans. New Potential Pharmacological Target to Decrease Ghrelin Secretion Very recently, gavage of bitter taste receptor (T2R) agonists was shown to increase plasma acyl ghrelin in mice through the stimulation of -gustducin, the -subunit of a trimeric G-protein complex involved in taste signal transduction. Immunofluorescence studies revealed that the stomach endocrine cells expressing ghrelin displayed up to 90-95% colocalization with -gustducin. Furthermore, gavage of T2R-agonists increased food intake in wild-type mice but not in -gustducin or GHS-R1a knock out mice. It is presently unclear if the transduction pathways induced following T2R activation could affect ghrelin acylation by GOAT and/or ghrelin release. In conclusion, T2R could represent a new interesting pharmacological target to modulate ghrelin secretion ( Figure 1). Furthermore, the potential use of T2R antagonists for the treatment of obesity remains to be evaluated. General Conclusions The involvement of ghrelin in obesity and the better understanding of ghrelin biology have led to the identification of pharmacological targets and the development of pharmacological compounds for the treatment of obesity and related diseases. So far, pharmacological compounds have been designed to target GOAT, ghrelin, and GHS-R1a. Very recently, it has been suggested that T2R could also represent an interesting target in the context of ghrelin and the treatment of obesity.
// Copyright (c) 2021 Terminus, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cloud.erda.analyzer.alert.sinks; import cloud.erda.analyzer.alert.models.AlertRecord; import cloud.erda.analyzer.common.constant.Constants; import lombok.extern.slf4j.Slf4j; import lombok.val; import org.apache.commons.lang3.StringUtils; import org.apache.flink.configuration.Configuration; import java.beans.PropertyVetoException; import java.sql.*; import java.util.ArrayList; import java.util.List; import java.util.Properties; /** * @author randomnil */ @Slf4j public class AlertRecordSink extends DBPoolSink<AlertRecord> { private static final String PREPARE_STATEMENT = "INSERT INTO `sp_alert_record`" + "(`group_id`, `scope`, `scope_key`, `alert_group`, `title`, `alert_state`, `alert_type`, `alert_index`, " + "`expression_key`, `alert_id`, `alert_name`, `rule_id`, `alert_time`) " + "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) " + "ON DUPLICATE KEY UPDATE " + "`alert_state` = ?, `alert_type` = ?, `alert_index` = ?, `alert_name` = ?, `alert_time` = ?"; private String url; private String user; private String password; private Connection conn; private PreparedStatement ps; private long interval = 5000; private long lastExecTimestamp; private int batchSize = 200; private List<AlertRecord> queue; public AlertRecordSink(Properties properties) { this.url = String.format("jdbc:mysql://%s:%s/%s?useUnicode=true&characterEncoding=UTF-8&autoReconnect=true&failOverReadOnly=false", properties.getProperty(Constants.MYSQL_HOST), properties.getProperty(Constants.MYSQL_PORT), properties.getProperty(Constants.MYSQL_DATABASE)); this.user = properties.getProperty(Constants.MYSQL_USERNAME); this.password = properties.getProperty(Constants.MYSQL_PASSWORD); val batchSize = properties.getProperty(Constants.MYSQL_BATCH_SIZE); if (StringUtils.isNotBlank(batchSize)) { this.batchSize = Integer.valueOf(batchSize); } val interval = properties.getProperty(Constants.MYSQL_INTERVAL); if (StringUtils.isNotBlank(interval)) { this.interval = Integer.valueOf(interval); } this.queue = new ArrayList<>(this.batchSize); } @Override public void open(Configuration parameters) throws Exception { super.open(parameters); this.openConnection(); } @Override public void close() throws Exception { super.close(); this.execute(); this.conn.close(); } @Override public void invoke(AlertRecord value, Context context) throws Exception { this.queue.add(value); if (this.queue.size() > this.batchSize || System.currentTimeMillis() - this.lastExecTimestamp > this.interval) { execute(); this.lastExecTimestamp = System.currentTimeMillis(); } } private void openConnection() throws SQLException, PropertyVetoException { initConnection(this.url,this.user,this.password); this.conn = newConnection(); this.ps = conn.prepareStatement(PREPARE_STATEMENT); this.lastExecTimestamp = System.currentTimeMillis(); } private void execute() throws Exception { for (val value : this.queue) { int i = 1; ps.setString(i++, value.getGroupId()); ps.setString(i++, value.getScope()); ps.setString(i++, value.getScopeKey()); ps.setString(i++, value.getAlertGroup()); ps.setString(i++, value.getTitle()); ps.setString(i++, value.getAlertState()); ps.setString(i++, value.getAlertType()); ps.setString(i++, value.getAlertIndex()); ps.setString(i++, value.getExpressionKey()); ps.setLong(i++, value.getAlertId()); ps.setString(i++, value.getAlertName()); ps.setLong(i++, value.getRuleId()); ps.setTimestamp(i++, new Timestamp(value.getAlertTime())); ps.setString(i++, value.getAlertState()); ps.setString(i++, value.getAlertType()); ps.setString(i++, value.getAlertIndex()); ps.setString(i++, value.getAlertName()); ps.setTimestamp(i++, new Timestamp(value.getAlertTime())); ps.addBatch(); } try { val result = ps.executeBatch(); val s = new StringBuffer(); for (val item : result) { s.append(item).append(","); } log.info("mysql sink invoke. result: {}", s); } catch (Exception e) { log.error("mysql sink invoke err.", e); } queue.clear(); } }
/* A class used to manage a group of attributes. It is only used internally, both by the ELEMENT and the DECLARATION. The set can be changed transparent to the Element and Declaration classes that use it, but NOT transparent to the Attribute which has to implement a next() and previous() method. Which makes it a bit problematic and prevents the use of STL. This version is implemented with circular lists because: - I like circular lists - it demonstrates some independence from the (typical) doubly linked list. */ class TiXmlAttributeSet { public: TiXmlAttributeSet(); ~TiXmlAttributeSet(); void Add(Attribute* attribute); void Remove(Attribute* attribute); const Attribute* First() const { return (sentinel.next == &sentinel) ? nullptr : sentinel.next; } Attribute* First() { return (sentinel.next == &sentinel) ? nullptr : sentinel.next; } const Attribute* Last() const { return (sentinel.prev == &sentinel) ? nullptr : sentinel.prev; } Attribute* Last() { return (sentinel.prev == &sentinel) ? nullptr : sentinel.prev; } const Attribute* Find(const char* _name) const; Attribute* Find(const char* _name) { return const_cast< Attribute* >((const_cast< const TiXmlAttributeSet* >(this))->Find(_name)); } const Attribute* Find(const std::string& _name) const; Attribute* Find(const std::string& _name) { return const_cast< Attribute* >((const_cast< const TiXmlAttributeSet* >(this))->Find(_name)); } private: TiXmlAttributeSet(const TiXmlAttributeSet&); void operator=(const TiXmlAttributeSet&); (as Attribute) Attribute sentinel; }
package utils.grinds; public class AmandaPointExtractorTest{/* extends UnitTest { AmandaPointExtractor amandaPointExtractor; AmandaForTest amandaForTest; String mapType = "OA"; DistanceCalculator distanceCalculator; @Before public void setup() { distanceCalculator = new DistanceCalculatorImpl(); distanceCalculator.setBoxCenter(new Vector3D(0, 0, 0)); distanceCalculator.setBoxSize(new Vector3D(2, 2, 2)); amandaPointExtractor = new AmandaPointExtractor(0, 0, distanceCalculator); amandaForTest = new AmandaForTest(1, 1, distanceCalculator); amandaPointExtractor.setMapType(mapType); amandaPointExtractor.setFirstCutoff(0, 0, 0, 0); } // @Test public void testForHDMap() { Logger.info("Big data amanda test"); amandaPointExtractor.setFirstCutoff(-0.55, -0.5, -0.15, -0.2); amandaPointExtractor.setMapType("HD"); String fileNamePath = "test-files/qsar/grinds/amanda/diazepam.HD.map"; amandaPointExtractor.extractAndRetrievePoints(fileNamePath); Logger.info("Big data amanda test finished"); } // @Test public void testForOAMap() { Logger.info("Big data amanda test"); amandaPointExtractor.setFirstCutoff(-0.55, -0.5, -0.15, -0.2); amandaPointExtractor.setMapType("OA"); String fileNamePath = "test-files/qsar/grinds/amanda/ampicillin.OA.map"; amandaPointExtractor.extractAndRetrievePoints(fileNamePath); Logger.info("Big data amanda test finished"); } // @Test public void testForCMap() { Logger.info("Big data amanda test"); amandaPointExtractor.setFirstCutoff(-0.55, -0.5, -0.15, -0.2); amandaPointExtractor.setMapType("C"); String fileNamePath = "test-files/qsar/grinds/amanda/progesterone.C.map"; amandaPointExtractor.extractAndRetrievePoints(fileNamePath); Logger.info("Big data amanda test finished"); } @Test public void extractPointsAppliesFirstCutoffCorrectly() { String fileNamePath = "test-files/qsar/grinds/amanda/test_20_nodes.OA.map"; amandaPointExtractor.extractPointsFromFile(fileNamePath); assertEquals(15, amandaPointExtractor.pointsPerAtom.totalSize()); } @Test public void extractPointsPerAtomPutsThemInTheCorrespondingAtom() { String fileNamePath = "test-files/qsar/grinds/amanda/test_20_nodes.OA.map"; amandaPointExtractor.extractPointsFromFile(fileNamePath); assertEquals(15, amandaPointExtractor.pointsPerAtom.totalSize()); assertEquals(8, amandaPointExtractor.pointsPerAtom.getCollection(1).size()); assertEquals(6, amandaPointExtractor.pointsPerAtom.getCollection(2).size()); assertEquals(1, amandaPointExtractor.pointsPerAtom.getCollection(3).size()); assertNull(amandaPointExtractor.pointsPerAtom.getCollection(4)); } @Test public void extractPointsPerAtomPutsThemInTheCorrespondingAtomAvoidingNonAtoms() { String fileNamePath = "test-files/qsar/grinds/amanda/test_20_nodes.OA_withNonAtoms.map"; amandaPointExtractor.extractPointsFromFile(fileNamePath); assertEquals(15, amandaPointExtractor.pointsPerAtom.totalSize()); assertEquals(8, amandaPointExtractor.pointsPerAtom.getCollection(1).size()); assertEquals(7, amandaPointExtractor.pointsPerAtom.getCollection(2).size()); assertNull(amandaPointExtractor.pointsPerAtom.getCollection(3)); assertNull(amandaPointExtractor.pointsPerAtom.getCollection(4)); } @Test public void nPerAtomAreProperlyCalculated() { String fileNamePath = "test-files/qsar/grinds/amanda/test_20_nodes.OA.map"; amandaPointExtractor.extractPointsFromFile(fileNamePath); amandaPointExtractor.calculateNValues(); Logger.info("Size " + amandaPointExtractor.pointsPerAtom.totalSize()); assertEquals("", 3.0, amandaPointExtractor.nPerAtom.get(1), 0.0001); assertEquals("", 3.0, amandaPointExtractor.nPerAtom.get(2), 0.0001); assertEquals("", 1.0, amandaPointExtractor.nPerAtom.get(3), 0.0001); } @Test public void twoNHighestEnergyAreSelectedCorrectly() { String fileNamePath = "test-files/qsar/grinds/amanda/test_20_nodes.OA.map"; amandaPointExtractor.extractPointsFromFile(fileNamePath); amandaPointExtractor.calculateNValues(); MultiValueMap map = amandaPointExtractor.select2NNodes(amandaPointExtractor.pointsPerAtom); List<GrindPoint> atomList1 = new ArrayList<GrindPoint>(); atomList1.add(new GrindPoint(21, -0.3, mapType, 0.3, 0)); atomList1.add(new GrindPoint(18, -0.21, mapType, 0.21, 0)); atomList1.add(new GrindPoint(20, -0.2, mapType, 0.2, 0)); atomList1.add(new GrindPoint(17, -0.11, mapType, 0.11, 0)); atomList1.add(new GrindPoint(2, -0.01, mapType, 0.01, 0)); atomList1.add(new GrindPoint(15, -0.01, mapType, 0.01, 0)); atomList1.add(new GrindPoint(16, -0.01, mapType, 0.01, 0)); atomList1.add(new GrindPoint(19, -0.01, mapType, 0.01, 0)); List<GrindPoint> atomList2 = new ArrayList<GrindPoint>(); atomList2.add(new GrindPoint(10, -0.4, mapType, 0.4, 0)); atomList2.add(new GrindPoint(23, -0.1, mapType, 0.1, 0)); atomList2.add(new GrindPoint(26, -0.1, mapType, 0.1, 0)); atomList2.add(new GrindPoint(22, -0.01, mapType, 0.01, 0)); atomList2.add(new GrindPoint(11, -0.001, mapType, 0.001, 0)); atomList2.add(new GrindPoint(12, -0.001, mapType, 0.001, 0)); List<GrindPoint> atomList3 = new ArrayList<GrindPoint>(); atomList3.add(new GrindPoint(25, -0.01, mapType, 0.01, 0)); List<GrindPoint> calculatedList = (List<GrindPoint>) map.getCollection(1); for (int i = 0; i < calculatedList.size(); i++) { assertEquals(0, new GrindPointSortByEnergyComparator().compare(atomList1.get(i), calculatedList.get(i))); assertEquals(0, atomList1.get(i).position - calculatedList.get(i).position); Logger.info("i atom 1: " + i); } calculatedList.clear(); calculatedList = (List<GrindPoint>) map.getCollection(2); for (int i = 0; i < calculatedList.size(); i++) { assertEquals(0, new GrindPointSortByEnergyComparator().compare(atomList2.get(i), calculatedList.get(i))); assertEquals(0, atomList2.get(i).position - calculatedList.get(i).position); Logger.info("i atom 2: " + i); } calculatedList.clear(); calculatedList = (List<GrindPoint>) map.getCollection(3); for (int i = 0; i < calculatedList.size(); i++) { assertEquals(0, new GrindPointSortByEnergyComparator().compare(atomList3.get(i), calculatedList.get(i))); assertEquals(0, atomList3.get(i).position - calculatedList.get(i).position); Logger.info("i atom 3: " + i); } } @Test public void finalNodesAreSelectedCorrectly() { String fileNamePath = "test-files/qsar/grinds/amanda/test_20_nodes.OA.map"; amandaPointExtractor.extractAndRetrievePoints(fileNamePath); List<GrindPoint> calculatedList = (List<GrindPoint>) amandaPointExtractor.finalPointsPerAtom.getCollection(1); assertEquals(3, calculatedList.size()); assertEquals(21, calculatedList.get(0).position); assertEquals(20, calculatedList.get(1).position); assertEquals(17, calculatedList.get(2).position); calculatedList = (List<GrindPoint>) amandaPointExtractor.finalPointsPerAtom.getCollection(2); assertEquals(3, calculatedList.size()); assertEquals(10, calculatedList.get(0).position); assertEquals(26, calculatedList.get(1).position); assertEquals(12, calculatedList.get(2).position); } @Test public void normalizeEnergiesIsCorrect() { List<GrindPoint> atomList1 = new ArrayList<GrindPoint>(); atomList1.add(new GrindPoint(0, -0.3, mapType, 0.1, 0)); atomList1.add(new GrindPoint(1, -0.21, mapType, 0.2, 0)); atomList1.add(new GrindPoint(2, -0.2, mapType, 0.3, 0)); atomList1.add(new GrindPoint(3, -0.11, mapType, 0.4, 0)); List<GrindPoint> atomList2 = amandaPointExtractor.normalizeAmandaScoreAndEnergy(atomList1); assertEquals(-1.1628, atomList2.get(0).amandaScore, 0.001); assertEquals(-0.3876, atomList2.get(1).amandaScore, 0.001); assertEquals(0.3876, atomList2.get(2).amandaScore, 0.001); assertEquals(1.1628, atomList2.get(3).amandaScore, 0.001); }*/ }
Attenuation of Severe Generalized Junctional Epidermolysis Bullosa by Systemic Treatment with Gentamicin Severe generalized junctional epidermolysis bullosa (JEB), a lethal genodermatosis, is mainly caused by premature termination codons (PTCs) in one of the three genes encoding the anchoring protein laminin-332. Only symptomatic treatment has been established; overcoming PTCs by aminoglycosides may represent an interesting alternative. This retrospective study aimed at assessing for the first time the clinical effects of systemic gentamicin application in infants with severe generalized JEB. Five patients, homozygous or compound-heterozygous for PTCs in the gene LAMB3, were treated with gentamicin which was administered intravenously or by intramuscular injection at doses of 7.5 mg/kg/d for three weeks. Skin biopsies were investigated by immunofluorescence analyses. Clinical effects of the medication were recorded with a parent questionnaire and by assessing weight-for-age charts. Gentamicin application was well tolerated, long hospitalization was not required. Low levels of laminin-332 could be detected in a skin sample obtained after treatment. Gentamicin had a positive impact on skin fragility and daily life in four patients but did not influence weight gain and failed to reverse the lethal course of the disease. Gentamicin injections should be considered regularly in cases of severe generalized JEB caused by PTCs as they may attenuate JEB symptoms without impeding quality of life.
<reponame>hsfzxjy/decup import pytest from mocona.scopedvar import V @pytest.mark.parametrize( "var, string", [ [V, "/"], [V.a, "/a"], [V("a"), "/a"], [V.a[...], "/a/"], [V.a[...].b, "/a/b"], [V.a[...].b - 0, "/a/b"], ], ) def test_V_str(var, string): assert str(var) == string
Alkaline Phosphatase Tagged Antibodies on Gold Nanoparticles/TiO2 Nanotubes Electrode: A Plasmonic Strategy for Label-Free and Amplified Photoelectrochemical Immunoassay. This work reports a plasmonic strategy capable of label-free yet amplified photoelectrochemical (PEC) immunoassay for the sensitive and specific detection of model protein p53, an important transcription factor that regulates the cell cycle and functions as a tumor suppressor. Specifically, on the basis of Au nanoparticles (NPs) deposited on hierarchically ordered TiO2 nanotubes (NTs), a protein G molecular membrane was used for immobilization of alkaline phosphatase (ALP) conjugated anti-p53 (ALP-a-p53). Due to the immunological recognition between the receptor and target, the plasmonic charge separation from Au NPs to the conduction band of TiO2 NTs could be influenced greatly that originated from multiple factors. The degree of signal suppression is directly associated with the target concentration, so by monitoring the changes of the plasmonic photocurrent responding after the specific binding, a new plasmonic PEC immunoassay could be tailored for label-free and amplified detection. The operating principle of this study could be extended as a general protocol for numerous other targets of interest.
/** * Simultaneously construct an action graph for all the actions in Skyframe and a map from {@link * PathFragment}s to their respective {@link Artifact}s. We do this in a threadpool to save around * 1.5 seconds on a mid-sized build versus a single-threaded operation. */ private static void constructActionGraphAndArtifactList( MutableActionGraph actionGraph, List<Artifact> artifacts, Sharder<ActionLookupValue> actionShards, ConcurrentMap<ActionAnalysisMetadata, ConflictException> badActionMap) throws InterruptedException { ExecutorService executor = Executors.newFixedThreadPool( NUM_JOBS, new ThreadFactoryBuilder().setNameFormat("ActionLookupValue Processor %d").build()); for (List<ActionLookupValue> shard : actionShards) { executor.execute(() -> actionRegistration(shard, actionGraph, artifacts, badActionMap)); } if (ExecutorUtil.interruptibleShutdown(executor)) { throw new InterruptedException(); } }
from tests.reporter.base import BaseCase from resource_locker.reporter import Timer class Test(BaseCase): def test_context(self): with Timer() as t: pass self.assertGreaterEqual(t.duration, 0) def test_stop_start(self): t = Timer() t.start().start() duration = t.stop() t.stop() self.assertGreaterEqual(t.duration, 0) self.assertGreaterEqual(duration, t.duration)
def update(self, gatorDefn, index): tempArray = np.zeros([1, len(self.events)]) for i, eventRow in enumerate(self.events): seg = gatorDefn.GetSegmentByName(eventRow[0]) if seg.GetResultValue(eventRow[1]).Class.value == 'DATE': if eventRow[2] in ['EpDay', 'EpMin', 'EpSec', 'EpYr', 'JDate', 'JED', 'ModJDate', 'EarthEpTU', 'GPSZ', 'JDateOff', 'SunEpTU']: tempArray[0, i] = float( seg.GetResultValue(eventRow[1]).Format(eventRow[2]).value) else: print( f'Format "{eventRow[2]}" not supported (must be representable as a float).' f' Substituting with "JDate".') tempArray[0, i] = float(seg.GetResultValue(eventRow[1]).Format('JDate').value) else: tempArray[0, i] = seg.GetResultValue(eventRow[1]).Getin(eventRow[2]).value print('1st Item: {:25} Shape: {}'.format(tempArray[0, 0], tempArray.T.shape)) if self.results is None: self.results = tempArray.T else: self.results = np.concatenate( (self.results, tempArray.T), axis=1)
A bovine respiratory syncytial virus model with high clinical expression in calves with specific passive immunity Background Bovine respiratory syncytial virus (BRSV) is a major cause of respiratory disease in cattle worldwide. Calves are particularly affected, even with low to moderate levels of BRSV-specific maternally derived antibodies (MDA). Available BRSV vaccines have suboptimal efficacy in calves with MDA, and published infection models in this target group are lacking in clinical expression. Here, we refine and characterize such a model. Results In a first experiment, 2 groups of 3 calves with low levels of MDA were experimentally inoculated by inhalation of aerosolized BRSV, either: the Snook strain, passaged in gnotobiotic calves (BRSV-Snk), or isolate no. 9402022 Denmark, passaged in cell culture (BRSV-Dk). All calves developed clinical signs of respiratory disease and shed high titers of virus, but BRSV-Snk induced more severe disease, which was then reproduced in a second experiment in 5 calves with moderate levels of MDA. These 5 calves shed high titers of virus and developed severe clinical signs of disease and extensive macroscopic lung lesions (mean+/−SD, 48.3+/−12.0% of lung), with a pulmonary influx of inflammatory cells, characterized by interferon gamma secretion and a marked effect on lung function. Conclusions We present a BRSV-infection model, with consistently high clinical expression in young calves with low to moderate levels of BRSV-specific MDA, that may prove useful in studies into disease pathogenesis, or evaluations of vaccines and antivirals. Additionally, refined tools to assess the outcome of BRSV infection are described, including passive measurement of lung function and a refined system to score clinical signs of disease. Using this cognate host calf model might also provide answers to elusive questions about human RSV (HRSV), a major cause of morbidity in children worldwide. induced by vaccination. The use of commercial vaccines in these animals has not always been fully satisfactory, and the development of a safe and effective BRSV vaccine, with a long duration of protection, therefore remains a high priority for the cattle industry. Furthermore, following vaccination, exacerbated reaction to natural or experimental infection, although uncommon, has been described in calves, and resembles that previously observed in children immunized with an inactivated vaccine against the genetically and antigenically closely related pneumovirus, human RSV (HRSV). For these reasons, as well as to improve understanding of the pathogenic mechanisms during an acute infection, a clinically expressive BRSV model is needed to study BRSV pathogenesis, and to evaluate the protective efficacy of vaccine candidates and antivirals. Several studies have attempted to reproduce field-like BRSV disease in young calves with varying levels of MDA, by administrating BRSV intranasally, intratracheally, or by a combination of intranasal and intratracheal route. Some studies report severe clinical disease following experimental BRSV infection, but omit observed or methodological details that would allow interstudy comparison (e.g. rectal temperature ). Whereas most studies have failed to reproduce severe clinical signs of disease, despite using high titers of virus and repeated inoculations, studies utilizing inoculation by inhalation of aerosol have been those most successful [7,14,, although this is not consistent. Here, our objective was to improve and characterize a BRSV model in calves, by selecting one of two inocula, based on two different strains passaged in calves or in cell culture, and used by two different research groups, to obtain a model that would induce clinical signs comparable to those observed in the field. In addition, we describe a refined scoring system for clinical signs of disease, and objective tools that can be used to monitor and assess the effects of BRSV infection in calves. Cells and viruses The BRSV Snook strain was isolated in calf kidney cells, and then passaged three consecutive times in gnotobiotic calves by inoculation by respiratory route, and prepared from bronchoalveolar lavage (BAL), as previously described (BRSV-Snk inoculum). BRSV isolate no. 9402022 Denmark was isolated in fetal lung cells, passaged in bovine turbinate cells, and prepared as described previously (passage 8, BRSV-Dk inoculum). Aliquots of the BRSV-Snk and BRSV-Dk inocula were titrated by plaque assay using calf kidney cells, as previously described. Through inoculation of appropriate cell cultures and mycoplasmal or bacterial media, all cells and virus preparations were determined to be free from bovine viral diarrhea virus and bacteria, including mycoplasma (data not shown). Animals The calves included were male, of Swedish Holstein or Swedish red and white breed, and originated from two conventional dairy herds, both free from bovine viral diarrhea virus. The herds were monitored for natural BRSV infections through monthly analysis of BRSVspecific IgG 1 (see section Detection of BRSV-specific antibodies) in bulk tank milk and in sera from calves, heifers and cows. Herd 1 was monitored from 17 days after the birth of the oldest calf, 1 day after the birth of the second oldest, and before the birth of the remaining calves in study 1. Herd 2, was monitored from 2 months before birth of the oldest calf to be challenged with BRSV in study 2. For study 1, six calves (A1-3 and B1-3) were obtained from herd 1. These calves had low levels of BRSV-specific serum MDA on the day of challenge; mean 4.1 ± 4.8% COD of kit positive at a dilution of 1:25, where ≤10%COD positive is considered negative by the ELISA kit. In study 2, five calves (C1-5) were obtained from herd 2, all with moderate levels of BRSV-specific serum MDA; mean 49 ± 30% COD positive, or log 10 titer 2.0 ± 0.2, defined as moderate. In addition, three calves (D1-3) were obtained from herd 1 to act as uninfected controls. Challenge and experimental design Groups of calves were housed in an animal facility, in separate rooms, with free access to clean water and roughage, and additional daily rations of concentrate. Each room had separate negative-pressure ventilation, physical bio-barriers and protective clothing for all staff. All calves were healthy on arrival, and no respiratory clinical signs were observed during one week of acclimation and quarantine. To minimize interference by bacterial co-infections, all calves were treated with antibiotics for five consecutive days (20 mg/kg/day procaine benzyl penicillin intramuscularly). On post-infection day (PID) 0, all calves were challenged by aerosol inhalation. Inhalation was facilitated by a face mask designed for drug-inhalation in foals (Swevet Piab AB, Sweden). Following challenge, calves were clinically monitored and samples collected until PID 7, when they were euthanized. In study 2, five calves (C1-5) were challenged with BRSV-Snk and monitored for seven days before euthanasia on PID 7, using the facilities and protocol described for study 1 (except where otherwise noted). In addition to these five calves (6 ± 3 weeks old), post-mortem (PM) BAL samples were collected and analyzed from three healthy calves (calves D1-3; 13 ± 4 weeks old), to act as controls for BAL samples from BRSV infected animals in study 2. Euthanization was performed by an overdose of general anesthesia (5 mg/kg ketamine and 15 mg/kg pentobarbital sodium) followed by exsanguination. Approval for both experiments were retained from the Ethical Committee of the district court of Uppsala, Sweden (Ref. no. C330/11). The ethical endpoint of both experiments, defined as the condition when animals would be euthanized prematurely, included: i) marked abdominal dyspnea or respiratory rate >100/min, in conjunction with severely depressed general state, or ii) anorexia for >24 h, or iii) rectal temperature >41°C for >36 h. Clinical and pathological examination Following challenge, daily clinical examinations were performed on each calf, and numerical values were determined for a set of predetermined parameters reflecting general state and respiratory disease (Table 1). Daily individual clinical scores were calculated by summing these numerical values multiplied by a coefficient for each parameter (Table 1). Coefficient weights reflect parameter association with disease severity in BRSV-infected calves less than 3 months of age, based on observations during natural BRSV-outbreaks. Thus, general depression and reduced or absent appetite in BRSV-infected calves were considered moderate to severe signs of BRSV disease with high clinical impact and poor prognosis (coefficients of 4), abdominal dyspnea a moderate sign (coefficient of 3), and increased rectal temperature and respiratory rate, mild to moderate signs (coefficients of 2). The other recorded parameters have varying clinical specificity and severity, from mild to severe, but typically have little clinical impact, and may be very transient. These parameters were assigned a coefficient of 1 (Table 1). Individual accumulated clinical scores (ACS) were calculated as the area under daily clinical scores, using the Trapezoid method. At PM examination, lung lesions were evaluated, recorded and quantified, as previously described. Tissue samples, preferentially from lesioned areas, were collected from each of the lobes in the right lung and trachea, and preserved in 5% paraformaldehyde. Sampling Serum was obtained from blood collected on PID −37, −15, 0 and 7, and stored at −20°C, until antibody analysis. Nasal secretions were collected and stored at −70°C, as previously described using sterile cotton-tipped swabs daily from PID 0 to 7, and tampons on PID 0 and 6. In study 1 endoscopic BAL in sedated calves was performed the day before challenge in all calves as previously described, including disinfection of the endoscope between each calf, except lungs were flushed with PBS with 120 g/ml benzyl penicillin sodium. In both study 1 and study 2 PM BAL was performed in all calves as previously described, except lungs were flushed using PBS. BAL fluid was stored on ice after recovery. BAL cells in 10 ml BAL fluid were pelleted by centrifugation (200 g, 10 min), and resuspended in either 350 l RLT buffer (Qiagen, Sweden) or 1 ml DMEM with 20% fetal calf serum, and stored at −70°C. BAL supernatant was recovered from centrifugation and stored at −70°C. Bacterial culture was attempted by inoculating bovine blood agar plates with 1 ml of unprocessed BAL fluid. Detection of BRSV-specific antibodies BRSV-specific IgG 1 antibodies were analyzed using a commercial ELISA kit (SVANOVIR® BRSV-Ab ELISA, Svanova, Sweden), in accordance with the manufacturer's instructions, including calculations of corrected optic density (COD) and percent of kit positive control (%COD positive). Detection and isolation of virus BRSV-F gene RNA present in nasal secretions or in BAL cells corresponding to 10 ml of BAL, was quantified by RT-qPCR as previously described, and expressed as TCID 50 equivalent units to dilutions of a virus sample with known titer. Accumulated virus shed (AVS) was calculated as the area under individual curves of BRSV detected by RT-qPCR in nasal secretions from PID 0 to PID 7. Virus isolation was attempted by inoculating bovine turbinate cells with BAL and nasal secretion samples, as previously described. Cultures of inoculated bovine turbinate cells were examined daily, and were considered positive if cytopathic effects appeared within seven days. Histological analysis Lung and trachea tissue samples were fixed in 10% buffered formalin, embedded in paraffin, sectioned and stained with hematoxylin and eosin (HE) and by immunohistochemistry (IHC) to detect BRSV antigen. BRSV immunohistochemistry staining For unmasking, sections were treated with heat-induced epitope retrieval (HIER). They were placed in HIER buffer (Target Retrieval Solution, pH = 6, DAKO, Sweden) and subjected to heat treatment in HIER Microwave at 750 W for 7 minutes followed by 350 W for 14 minutes and were allowed to stand for 20 min at room temperature. Endogenous peroxidase activity was blocked with 3% hydrogen peroxide for 20 min at room temperature. Unspecific antigen staining was blocked with 2% bovine serum albumin (Sigma-Aldrich, Sweden AB) for 20 min. The slides were then incubated at room temperature for 45 min with mouse monoclonal antibody anti RSV (clone 5H5, 2G122, 5A6 and 1C3, NCL-RSV3, Novocastra, Leica Microsystems, Sweden) diluted 1:100 in diluents buffer (1% BSA/TBS pH = 7.6). The detection was conducted with the dextran polymer method (EnVisionTM/mouse, DAKO, Sweden). The color was developed with diaminobenzidine substrate (DAB, DAKO, Sweden). Sections were counterstained with haematoxylin. Antibody-omission stained sections served as negative controls for each section. Appropriate positive and negative control sections were included in each run. Scoring of histopathological severity of inflammation The severity of histopathology was scored in each HEstained section, from 0 (normal), 1 (mild), 2 (moderate) to 3 (severe). The extent and localization of BRSV-antigen was evaluated in IHC-stained sections. BAL cell type composition was determined by manual microscopic analysis of stained cytospin preparations of BAL fluid. Measuring lung function Lung function was passively measured before and after BRSV challenge, on PID 0 and 6, by the forced oscillation technique (EquineOsc Calf measurement head, EEMS, Harts, UK), using the same face mask described for aerosol inhalation. Values for resistance (R) and reactance (X)(kPa/L/s) were obtained at 3, 5, 7 and 10 Hz, as described by Reinhold and colleagues. Each calf was tested at least twice on each day and the data sets with optimal coherence selected (coherence > 0.9; majority of data sets > 0.97). In the event of clear artifacts of breathing, such as cough or breath holding, the series were repeated. Daily calibration was performed using a 2.26 m long tube, with a 21 mm internal diameter. Ranking of infected calves To encompass the three major aspects of BRSVinfection clinical signs, lung pathology and virus replication, the six calves in study 1 (calves A1-3 and B1-3) were ranked from least affected to most affected based on: accumulated clinical scores recorded from PID 0 to PID 7; degree of consolidative lesions in lungs on PID 7; and accumulated virus detected in nasal secretions from PID 0 to PID 7. Group rank sums were then calculated for each rank, and for all three ranks (total rank sum). Statistical analysis Where not otherwise stated, results are presented as group mean ± standard deviation (SD). For results presented as a percentage of a whole, SD is presented in percentage points (pp). Statistically significant differences were determined using either one-way ANOVA followed by Student's t-test, or pairwise t-test, or Kruskal-Wallis analysis followed by Wilcoxon test (JMP 10 for Mac, SAS Institute Inc.). Significance was assumed when p ≤ 0.05 and tendency when p ≤ 0.1. Study 1: Evaluation of clinical, pathological and virological expression of two virulent BRSV inocula in calves with low levels of MDA Clinical signs following challenge Following experimental infection, mild to severe clinical signs of respiratory disease were observed in all infected calves ( Figure 1A). For all calves, upper respiratory signs, such as nasal discharge and coughing, as well as ocular discharge were observed on PID 3-5. In BRSV-Snk infected calves, these progressed to severe respiratory signs on PID7, whereas clinical signs were more moderate on PID 7 in calves infected with BRSV-Dk (Table 2). Consequently, compared to BRSV-Dk infected calves, BRSV-Snk infected calves had significantly higher (p ≤ 0.05) accumulated clinical scores ( Figure 1B). Macroscopic and histological lung pathology BRSV-Snk infected calves tended to have more extensive consolidated lung lesions (38.5 ± 26.3% of total lung tissue) on PID7, compared to calves infected with BRSV-Dk (12.8 ± 14.6%), but this difference was not statistically significant (p = 0.23; Figure 2A and C). Histologically, lesions in the trachea in both groups of calves consisted of degeneration and necrosis of epithelium, and epithelial hyperplasia in some areas ( Figure 2B:I and II show representative pictures from BRSV-Snk and BRSV-Dk infected animals, respectively). In the lungs, BRSV-Snk infected calves showed extensive moderate to severe bronchointerstitial pneumonia, as well as purulent bronchitis and bronchiolitis ( Figure 2B:III; representative picture of lung, calf A2). BRSV-Dk infected calves showed similar but less severe histopathological changes in the lungs, ranging from mild to moderate ( Figure 2B:IV; representative picture of lung, calf B3). In summary, BRSV-Snk infected animals tended to have macroscopically more extensive, and histologically more severe lung lesions, compared to BRSV-Dk infected animals ( Figure 2C), but with no discernible difference in the severity of histological inflammation in the trachea. RT-qPCR detection and isolation of BRSV in nasal secretion and BAL BRSV RNA was detected by RT-qPCR in nasal secretions collected daily from PID 0 to PID 7, and in BAL collected on PID 7 (PM BAL). In addition, BRSV was isolated in the first passage in bovine turbinate cell culture, from all infected calves, in both nasal secretions from PID 6, and PM BAL fluid. Attempted bacterial culture from BAL fluid indicated no bacterial coinfection in the lungs of any of the calves. Two of the BRSV-Snk infected calves (A2 and A3) started shedding virus on PID 2, and shed high amounts of virus (A2 log 10 AVS 19.3 TCID 50 equiv.; A3 log 10 AVS 14.9 TCID 50 equiv.), both in nasal secretions and PM BAL, whereas the third BRSV-Snk infected calf (A1), shed substantially less virus (log 10 AVS 3.6 TCID 50 equiv.) ( Figure 3A-B). Compared to the two high-shedding BRSV-Snk infected calves, calves infected with BRSV-Dk shed markedly less virus in nasal secretions (B1, B2 and B3 log 10 AVS 3.5, 9.4 and 11.1 TCID 50 equiv., respectively), and had less viral RNA in BAL, although this was not statistically significant ( Figure 3A-B). BRSV immunostaining in lung and trachea sections In IHC-stained sections of trachea very little or no BRSV antigen was detected in BRSV-Snk infected calves ( Figure 3C:I is representative), whereas viral antigen was abundant in sections of trachea from BRSV-Dk infected calves ( Figure 3C:II is representative). Conversely, whereas viral antigen was abundant in the lungs of 2/3 BRSV-Snk infected calves ( Figure 3C:III is representative), very little or no BRSV antigen was detected in the lungs from BRSV-Dk infected calves ( Figure 3C:IV is representative). Six calves were experimentally infected with virulent BRSV, either passaged in vivo (BRSV-Snk, n = 3, calves A1-3), or in vitro (BRSV-Dk, n = 3, calves B1-3). Clinical signs were recorded daily for seven days, and scores calculated as described in Table 1. The third BRSV-Snk infected calf (A1) was negative for BRSV antigen by IHC, both in the trachea and in the lungs (data not shown). Serum BRSV-specific antibodies All calves, except A1, had low and consistently decreasing levels of BRSV-specific MDA, throughout the experiment ( Figure 4). In contrast, the BRSV-Snk infected calf A1 (the oldest calf in study 1) seroconverted within 7 days after challenge, strongly suggesting that this calf had been previously primed against BRSV. Animal ranking When calves were ranked from least affected to most affected based on clinical score, degree of lung pathology and accumulated virus shed in nasal secretions, two of the BRSV-Snk infected calves (A2 and A3) consistently received the highest ranks ( Figure 5A). Conversely, the calves infected with BRSV-Dk received low ranks, as they demonstrated less severe clinical signs, less lung pathology, and less virus shedding ( Figure 5A). The BRSV-Snk infected calf that rapidly seroconverted following challenge (A1), received a high clinical rank, an intermediate lung pathology rank, and a low viralshed rank ( Figure 5A). Overall, the BRSV-Snk infected calves ranked significantly higher, compared to calves infected with BRSV-Dk (p ≤ 0.01; Figure 5B). Based on the overall ability of the BRSV-Snk inoculum to induce BRSV infection, it was chosen as the inoculum in study 2, to reproduce and characterize the model in calves with moderate levels of MDA. Study 2: Reproduction of clinical signs, virology and pathology using aerosolized BRSV-Snk in calves with passive immunity Based on the high level of clinical signs of disease observed in calves with low levels of MDA, following challenge with BRSV-Snk in study 1, an additional five calves (calves C1-5), which were all BRSV-naive and had moderate levels of BRSV-specific serum IgG 1 MDA were challenged using the same inoculum and protocol as used in study 1. Clinical signs, lung pathology and virology following challenge Clinical signs of disease and lung pathology, as well as levels of viral RNA detected in the upper and lower airways in study 2, following challenge of calves C1-5, have been described in detail elsewhere. The amplitude and kinetics of these parameters were in line with observations in BRSV-Snk infected calves in study 1 ( Figure 6A-C). Briefly, all infected calves in study 2 developed clinical signs of upper respiratory disease starting on PID 3-5, which progressed to severe lower respiratory disease from PID 5 to PID 7 ( Figure 6A). On PID 7, all five calves were moderately to severely depressed (recumbent, and staggering when prompted to rise), with reduced or absent appetite. Although both groups of BRSV-Snk infected calves in study 1 and 2 shed high amounts of virus, as detected by RT-qPCR, calves in study 2 shed less accumulated virus in nasal secretions compared to those in study 1 (log 10 1.6 TCID 50 eq. difference in mean), but more in BAL fluid on PID 7 (log 10 2.1 TCID 50 eq. difference in mean). At postmortem, BRSV-Snk infected calves in study 2 had extensive consolidated lung lesions and histopathological changes on PID 7, similar in extent to those in study 1 (38.5 ± 26.3% and 48.3 ± 12.0% of total lung area for study 1 and study 2, respectively; Figure 6C; mean histological score 2.7 ± 0.3 and 2.9 ± 0.1 for study 1 and study 2, respectively; Figure 6C). Quantitative assessment of lung function The impact of lower respiratory disease (as demonstrated by clinical signs of disease and lung pathology) on lung function in the five calves in study 2 was evaluated by the forced oscillation technique before and after challenge (on PID 0 and 6). Following challenge, infected animals demonstrated a tendency at 10 Hz measurements for increased airway resistance (0.17 ± 0.03 kPa/L/ s on PID 0; 0.20 ± 0.06 kPa/L/s on PID 6; p = 0.2, pairwise t-test) and significantly decreased airway reactance (0.03 ± 0.03 kPa/L/s on PID 0; −0.02 ± 0.04 kPa/L/s on PID 6; p ≤ 0.05, pairwise t-test; Figure 7). Cytology and cytokine profile in BAL Seven days after experimental BRSV infection, BAL was collected from all five infected calves in study 2, and in addition, from three uninfected calves. BAL cell types in cytospin preparations were analyzed by light microscopy ( Figure 8A) and BAL supernatant was analyzed using ELISAs, specific to bovine inflammatory cytokines ( Figure 8B-F). Cytokine analysis of BAL supernatant from infected calves demonstrated significantly higher levels of IFN (p ≤ 0.005; Figure 8E) and a tendency for higher levels of IL-6 (p = 0.08; Figure 8C), compared to uninfected control calves. In contrast, levels of IL-4, IL-8 and TNF in BAL, did not differ from those of uninfected controls, seven days after infection ( Figure 8B,D and F). Discussion In the present paper, we describe an experimental model of BRSV infection with strong clinical and pathological Figure 5 Clinical, pathological and virological ranking of calves following aerosol challenge with either BRSV-Snk or BRSV-Dk. Calves were experimentally infected as described in Figure 1. Following challenge, calves were ranked (panel A) based on accumulated daily clinical scores (Clinical rank), nasal virus shed (Viral-shed rank), and extent of lung lesions (Pathology rank). Panel B shows the rank sum for each of the three ranks, and the total rank sum per group. expression in calves with maternal antibodies. This model combines and refines elements from previously published studies, including aerosol inoculation, the use of inoculum passaged in gnotobiotic calves, and methods to monitor and quantify clinical, pathological and virological parameters. We believe that this model can serve to enable a better evaluation of vaccine and antiviral safety and efficacy and further increase understanding of the pathogenesis of BRSV, and also of HRSV. Regardless of the inoculum, all inoculated calves (n = 11), in both studies, developed manifest BRSV disease. The rapid seroconversion detected in calf A1 in study 1 indicated that, in contrast to the other calves, this BRSV-Snkinfected calf had been previously exposed to natural BRSV. This highlights that, to ascertain BRSV naivet by seromonitoring in herds, seronegative sentinel animals need to be regularly monitored during the entire lifespan of calves to be included in experimental trials. This case also confirms earlier reports that a sufficient amount of MDA can suppress detectable humoral immune responses, following BRSV infection in young calves, with the net effect of declining MDA detected by ELISA. However, although this previous priming appear to have provided some virological protection, compared to all other BRSV-Snk infected calves, calf A1 demonstrated severe clinical disease following BRSV infection, in the absence of any other detected pathogen, contrary to previously published reports. In contrast, the moderate clinical signs, pathology and virus shed observed in calf B1 following BRSV-Dk challenge, may possibly be explained by favorable genetics, with more efficient innate and cellular responses. Any previous BRSV exposure of calf B1, even if virus replication was very limited, would have resulted in a rapid anamnestic humoral immune response upon reinfection, as seen in calf A1, and demonstrated elsewhere. Immunohistochemical staining for BRSV antigen in study 1 showed a marked difference in localization of virus on PID 7, where two BRSV-Snk infected calves had large amounts of virus in the lungs and only small amounts of virus in the trachea, while the reverse was true for BRSV-Dk infected calves. This disparity in antigen localization on PID 7 might be due to delayed progression of viral replication in BRSV-Dk infected calves. This opens the possibility that BRSV-Dk infected calves might have developed more severe clinical signs, if the 7-day challenge model had been abandoned and the experiment had been prolonged. However, this would contradict previous observations using the BRSV-Dk inoculum, which indicate a peak of clinical signs on PID 6. Nonetheless, virus was isolated and high quantities of viral RNA were detected by RT-qPCR in samples from the upper and lower airways from all infected calves in both studies, including calves A1 and B1, although BRSV-Snk infected calves from both studies shed 10 3 times more virus in nasal secretions than BRSV-Dk infected calves. Despite the differing data of calves A1 and B1, and although the number of animals in the first study was low (n = 3 + 3), we concluded that BRSV-Snk infected calves tended to be more severely affected in the 7-day experimental infection model, compared to BRSV-Dk infected calves, when summarizing clinical, pathological and virological parameters ( Figure 5B). Thus, results from study 1 were reproduced in study 2, using aerosol inoculation of the BRSV-Snk inoculum in an additional five BRSVnaive calves, with moderate levels of MDA. Using a minimal amount of aerosolized inoculum (10 4.0 pfu for BRSV-Snk) to experimentally infect calves with and without MDA, the kinetics of severe naturally occurring BRSV infection in calves was recreated in this study. On PID 7, most calves were severely affected, and all BRSV-Snk infected calves in study 2 were demonstrating depression, anorexia, pyrexia, tachypnea, abdominal dyspnea and wheezing lung sounds. The high level of clinical expression in BRSV-Snk infected calves was mirrored by the great extent of macroscopic lung lesions and by the severity of histopathological changes in the lungs on PID 7. Manifest inflammation was further verified in BRSV-Snk infected calves in study 2, with significantly increased numbers of neutrophils, macrophages and lymphocytes in BAL, similar to that reported following natural BRSV infection in calves. At the peak of clinical signs, 7 days post infection, the calves in study 2 also demonstrated an increase of IFN and minimal amounts of IL-4 in BAL supernatant, which agrees with previously reported responses to primary BRSV infection in calves. Previous studies have shown that T lymphocytes migrating to the lung during BRSV infection are predominantly IFN producing CD8 + T cells, which have been shown to be important for BRSV clearance. However, at least a proportion of the IFN detected in BAL supernatant in study 2, may also have been produced by NK cells, or alveolar macrophages, as have been shown in vitro with human alveolar macrophages. Similar to that seen in calves, infants hospitalized with severe HRSV bronchiolitis, had an increased frequency of IFN producing CD8 + T cells, collected by nasal brush, compared to infants with milder upper respiratory tract infections. Thus, IFN in BAL supernatant can serve as an objective measurement of disease severity, following experimental BRSV challenge. Elevated levels of TNF, IL-6 and IL-8 in BAL or serum have also been associated with clinical signs and pathology caused by BRSV infection in calves. The lack of detectable increases in these cytokines in BAL supernatant on PID 7 in study 2 may have been due to suboptimal timing of BAL collection, as another study where 6 weeks old calves where infected with BRSV reported TNF and IL-6 concentrations in BAL to peak on PID 9 and PID 3, respectively. The patent pneumonia in the BRSV-Snk infected calves following infection in study 2, as demonstrated by clinical signs, lung pathology, and the inflammatory picture in BAL, also reduced the lung function of affected animals; with increased airway resistance, and decreased airway reactance, which is suggestive of bronchoconstriction and obstructive airway processes. The objective measurement of lung function by the forced oscillation technique can be a useful tool in further quantifying the outcome of a BRSV challenge, and the efficacy of vaccine candidates, in calves. Optimization of materials (e.g. face mask and tubing) and methods (e.g. frequencies used) could further improve the analysis, and need to be investigated in a larger set of calves. The relative potency of the BRSV-Snk inoculum might be due to loss of virulence in the BRSV-Dk inoculum, following passage in cell culture. This is supported by previously published studies, which demonstrated a higher level of clinical signs of respiratory disease in calves with MDA, using the same mode of inoculation and the same isolate as the BRSV-Dk inoculum, but with fewer passages in vitro, and by propagation in fetal lung cells. Loss of virulence following in vitro passage has been reported in some studies for BRSV and HRSV, but not in others, and likely depends on the type of cells and number of passages, and may be associated with alterations in protein expression and post-translational modifications. The BRSV-Snk inoculum, in contrast to the BRSV-Dk inoculum, had been passaged in gnotobiotic calves. Apart from the inoculum, challenge by aerosol inhalation hinges on two principal factors: the quality of the aerosol, and the quantity inhaled by each animal. Limited experimental infection studies in calves, using similar aerosolization of BRSV in conjunction with intratracheal injection, indicate that virus is mainly deposited in the upper airways using this method, with subsequent progression of virus replication to the lower airways. However, other studies using inhaled aerosols show that droplets ≤5 m in diameter (67% of droplets in the present study) can reach the alveoli in humans, and reach the whole lung when infecting steers with aerosolized foot-and-mouth disease virus, and more accurately reproduces the symptoms of natural infection, compared to large droplet intranasal administration, when human volunteers were infected with influenza. Thus, more research on the kinetics of natural BRSV infection is needed, to complement experimental findings, and to further elucidate the relevance of the model with regard to BRSV pathogenesis. To study the unmodified pathogenesis of BRSV, fieldlike clinical signs are essential, and to calculate relevant treatment effects in vaccine or antiviral trials, a minimum clinical expression is required, making the model presented herein highly relevant, in contrast to comparable models with less clinical signs, or comparable clinical expression, but less neutralizing MDA at the time of challenge. This cognate host calf model might also provide further understanding about HRSV in infants, with particular usefulness in the study of RSV pathogenesis and pathological processes in the lower airways, where data from infants is limited, but also to evaluate candidate vaccines that utilize proteins conserved across BRSV and HRSV. Conclusions In conclusion, we have established a BRSV model with a severe clinical expression in calves with maternal antibodies at the time of challenge. We furthermore describe tools to evaluate disease severity: consistently, using a rigid and comprehensive clinical scoring system; and objectively, using a passive lung function test and IFN concentration in BAL, to complement established parameters, such as extent of lung lesions and virus shedding following challenge. These tools can be used in future BRSV research and vaccine development studies and this model could also be valuable for the understanding of HRSV.
It's safe to say that the expectations for the upcoming third season of AMC's The Walking Dead are fairly high, given the tease at the end of season 2 featuring not only the debut of fan-favorite Michonne (Danai Gurira), but the new season will also take the survivors off Hershel's farm and have them set up camp in a rather ominous-looking prison. In an an on-set interview with AMC, Kirkman offered a description of the prison set and spoke briefly on the subject of Michonne's sword-handling skills. He also spoke about fan expectations with the inclusion of the villainous Governor (David Morrissey) and how he will play into the larger, more dangerous world that is expected to be unveiled during season 3. According to Kirkman, the prison set is one of the most remarkable pieces of construction for a television series, and it largely works because of its faithfulness to the source material. "The big change this season is we've got this amazing prison that we're filming in. It's absolutely stunning, and I never get used to being on set. They've taken a lot of what you see in the comic book series and brought it to life in ways that I didn't think possible. This is going to be one of the most impressive looking things that's ever been put together for a show." So far, The Walking Dead has been about small band of characters surviving together against the threat of the undead walkers, but in season 2, a potentially greater menace was revealed in the form of other survivors, hell bent on making the most out of the inherent lawlessness of a society in ruins. With that added danger, season 3 begins a new chapter where humans step up to the top of the food chain and once more become the primary hazard to the living. "The plan was always to evolve naturally into a place were the zombies essentially become a manageable threat. You know the rules. You know how to deal with them. To a certain extent they become something to not really be scared of unless you mess up. Humans, however, do not follow any rules and will always do something that surprises you and are capable of doing things far worse than trying to eat you... We're definitely going to be seeing a lot of horrible things." Of course, for fans of the comic, the prison storyline features horrible things done to and by the two newest members of the cast: Michonne and the Governor. To hear Kirkman say it, the adaptation of the key prison elements to television will set the standard for The Walking Dead series in terms of the scale and spectacle of the storytelling. "When you think about The Walking Dead comic series, you think about oh, the stuff they with did the Governor, the stuff they did with the prison and Woodbury and Michonne. And that's really a lot of the stuff that people remember the story for and that's stuff we haven't even gotten to in the TV show yet. So as much as people love the show, and as high as the ratings are, and as cool as the show is, I feel like we haven't even gotten to the good stuff yet. This season is absolutely going to blow people away." Although she was only teased at the end of the second season, Michonne quickly became one of the most talked about aspects of the finale and since then everyone seems to be fascinated with the sword-weilding mystery woman and her undead traveling companions. Although Kirkman confirms Michonne will be hacking some zombies, the show won't lose its focus on the human drama. "There's been quite a bit of sword training going on and [Danai] is doing an amazing job. She's going to do all of the hard character stuff and drama that The Walking Dead is known for, but she has tremendous physical capability and the sword training that I've seen is absolutely amazing. I can't wait to see her hack up some zombies. She's using a sword that we specially designed for the show...The origins of the sword will be revealed on the show." One thing fans can expect is more zombie madness when season 3 rolls around; mainly because the season has increased the episode count of season 2 - bringing the total number of episodes to 16. That's 10 more episodes than the entirety of season 1. "The actors and the crew and a lot of the producers would not be thrilled to hear me saying this, but I love doing 16 episodes a year. I think the more the better, and I think it's a lot of fun. We are going to get to tell bigger stories, and tell more stories, and get into the characters a lot more. So the more the merrier." Look for The Walking Dead season 3 to kick off on AMC in October.
Too Close for Comfort Synopsis Ted Knight and Nancy Dussault star as Henry and Muriel Rush, respectively; owners of a two-family house in San Francisco, California. Henry is a conservative cartoonist who authors a comic strip called Cosmic Cow. During scenes in which Henry draws in his bedroom, Knight used his earlier acquired ventriloquism talents for comical conversations with a hand-puppet version of "Cosmic Cow." Muriel is a laid back freelance photographer, having been a lead singer of a band in her earlier days. They have two grown children, older daughter, brunette Jackie (Deborah Van Valkenburgh) who works for a bank and younger daughter Sara (Lydia Cornell), a blonde bombshell and a college student at San Francisco State University. At the start of the premiere episode, Jackie and Sara are living with their parents in a cramped, awkward arrangement. Their longtime downstairs tenant, Myron (later called Neville) Rafkin, recently died. The family discovers Rafkin was a transvestite and the many strange women Henry had been opening the door for all those years were actually Rafkin himself. Jackie and Sara convince their parents to allow them to move into the now-vacant downstairs apartment. In a running gag, Henry falls off the girls' ultra-modern chairs or couch every time he attempts to sit down. Despite the daughters' push for independence and moving into the downstairs apartment, Henry proves to be a very protective father and constantly meddles in their affairs. Due to an actors' strike led by the Screen Actors Guild and the American Federation of Television and Radio Artists, new programming for the fall 1980 season was pushed back several months. As a result, Too Close for Comfort did not debut until November 11, 1980, and its initial season consisted of 19 episodes. The show garnered high ratings, benefiting from its placement in ABC's powerhouse Tuesday night lineup following hits like Happy Days, Laverne & Shirley and Three's Company. Both the latter series and this series were recorded in the same studio, CBS Television City in Los Angeles, during Too Close for Comfort's ABC years. A few episodes into the series, Sara's addle-headed friend Monroe Ficus, played by actor Jim J. Bullock, made an appearance. Although he was originally intended to be used for only a single episode, producers added the character to the series. Monroe was introduced to Henry by Sara as a depressed, lonely fellow student and street musician. Although Sara (with help from Jackie) tried to help him and send him on his way, Monroe found himself getting woven into the entire family's affairs and he became just a "friend" of Sara's and Henry's principal (if unintended) foil. During the first two seasons, Selma Diamond made guest appearances as Mildred Rafkin, sister of the late Myron. Sardonic, deadpan Mildred initially showed up to collect belongings left by Myron/Neville in the downstairs apartment, but continued to visit thereafter. Seemingly, there were sentimental reasons, but occasionally she would attempt to make time with the much younger Monroe, with whom she was infatuated. Also added in early 1981 was Arthur Wainwright (Hamilton Camp), Henry's boss and head of Wainwright Publishing, who nearly decided to force the veteran cartoonist and Cosmic Cow into retirement in order to maintain a youth-oriented staff. The short-statured Mr. Wainwright, who spoke with Shakespearean diction and fancied himself an amateur detective (as a result of the famous mystery novels his company published), eventually let Henry stay with the firm, after the latter proved adept in helping him solve the mystery of Sara's stolen purse. Wainwright no longer appeared in person after the first season, but was referred to. Later, at the start of the fifth season, Graham Jarvis began appearing as Wainwright in a few guest appearances. Developments in seasons two and three During its second season, the series' principal stories are focused around Muriel's pregnancy. Additionally, Henry's niece April (Deena Freeman) comes from Delaware to live with the Rush family. The season concludes with Muriel giving birth to a son, Andrew (later played regularly by twins William and Michael Cannon from 1983 to 1984). For the third season, April departs and the character of Muriel's mother, Iris Martin (Audrey Meadows) is added in order to help take care of Andrew. Also that fall, Jackie becomes engaged to her steady boyfriend, police officer Brad Turner (played by Gary Dontzig in one episode and by Jordan Suffin thereafter), but they broke it off after a short time. Jackie eventually moved into the field of fashion design, taking courses and producing her own clothing templates, which she later had produced as "The Jacqueline Rush Collection." Sara, meanwhile, decided to major in communications and, while continuing her studies, became a weather girl for a time at a major San Francisco TV station. Monroe seemed to be detached from Sara's circle of friends, but was taking the same major as her and became a security guard around campus. The character of Henry Rush became famous for wearing sweatshirts from various American colleges and universities. It was revealed in one episode that he wore the different sweatshirts because he himself had never gone to college. Eventually fans would send in sweatshirts from universities around the country hoping they would be used during taping. In the fall of 1982, ABC moved the series to Thursday nights, which proved to be disastrous. Paired with failures such as Joanie Loves Chachi, Star of the Family and It Takes Two, Too Close For Comfort saw its ratings fall drastically. At the conclusion of the season, the network cancelled the series, after dropping from #6 the previous season to #38. ABC broadcast the last first-run episode broadcast on May 5, 1983, as a pilot for a proposed spin-off series called Family Business. The series was to have focused on the misadventures of Lucille Garabaldi (Lainie Kazan) and her two sons (played by George Deloy and Jimmy Baio) as they tried to run a construction business. Hillary Bailey Smith was also featured in this backdoor pilot as the new, attractive female foreman that Lucille hired for her sons. ABC aired reruns of Too Close for Comfort at 11:00 am ET from June 27 to September 23, 1983. First-run syndication During the early 1980s, TV station owner Metromedia was expanding its portfolio of original syndicated programming through its production subsidiary, Metromedia Producers Corporation. Its efforts would eventually lead to the creation of the Fox Broadcasting Company. When Too Close for Comfort was canceled by ABC, Metromedia Producers Corporation elected to pick up the series and began producing all-new episodes to run on various stations throughout the country. Starting in April 1984, a total of 23 new episodes were broadcast for the show's fourth season, featuring the same cast as seen on the ABC episodes. Monroe and Iris were still around to bother Henry (although Meadows had cut back her involvement to guest shots only, so her character moves back to Chicago in the season premiere) and Jackie and Sara were still downstairs. The girls continued to advance in the respective career paths; Sara auditioned for a news anchor position at the TV station, but was passed over in favor of a female candidate who may have not had Sara's looks, but had greater experience in hard news. This caused Sara to learn the valuable lesson that her sex appeal alone would not get her everywhere. Monroe eventually moves into a remodeled attic, with the entrance from the Rushes' kitchen. Henry agreed to have Monroe as a tenant in a fleeting moment of compassion, but Monroe still proved to be a constant annoyance to him. The show's ratings improved in syndication and Metromedia ordered an additional 30 episodes, airing through November 1985. When the fifth season began, a single child actor, Joshua Goodwin, took over the role of Andrew Rush (which he would hold for the remainder of the series). Henry was now working out of his own fancy office at Wainwright Publishing, as a result of toddler Andrew's "terrible twos" behavior interfering with his concentration at home. Everyone else's worldly or, in the case of Monroe, wacky affairs were also proving to be an intense distraction, considering they were all living under the same roof. Near the end of the season, Jackie accepted a job offer in Italy that would help further her clothing line, with her family and friends giving her a big send-off. With a total of 107 episodes of Too Close for Comfort having been produced, the show became a popular staple for syndicated reruns throughout the late 1980s. The Ted Knight Show In late 1985, several changes were made before further episodes were produced. The show's title was changed to The Ted Knight Show (not to be confused with the short-lived 1978 CBS show of the same name; hence it was occasionally referred to as The New Ted Knight Show, such as when Jim J. Bullock made a guest appearance on Break the Bank) and the setting was changed to Marin County, north of San Francisco. A new arrangement of Johnny Mandel's theme song was recorded, and a new opening title sequence was shot in the surrounding area. Deborah Van Valkenburgh, Lydia Cornell, and Audrey Meadows left the cast (Meadows would make one guest appearance this season). Veteran actress Pat Carroll, as well as Lisa Antille were added to the cast along with returning Nancy Dussault, and Jim J. Bullock. The Rushes had moved to a larger house near Mill Valley. Henry retired from cartooning and became editor of the Marin Bugler, a local newspaper. Henry purchased a 49% stake in the publication from Hope Stinson (Carroll), who retained the other 51% and proved to be a thorn in his side. Muriel began working as the paper's staff photographer. Monroe, now living in his own apartment, visited frequently, and worked as a reporter-in-training at the Bugler. The Rushes hired a live-in nanny/housekeeper, a young woman named Lisa Flores (Antille), who would later become involved with Monroe. Antille had made a guest appearance in a fifth season Too Close for Comfort episode as Yvonne, a housekeeper the Rushes employed until she attempted to marry Monroe to avoid deportation. First-run episodes of The Ted Knight Show were broadcast starting in April 1986. Twenty-two episodes were produced prior to the summer of 1986 and twelve had aired by mid-July. The revamped show continued to be successful and was scheduled to resume production for another season but Ted Knight, who had been battling colon cancer since 1985, died on August 26, 1986 at the age of 62 and no further episodes would be produced. The ten remaining first-run episodes were broadcast from September 1986 to February 1987. When the episodes of The Ted Knight Show were added to the rerun package of Too Close for Comfort, the original show's title graphic was used, but the updated opening theme and sequence remained unchanged.
POSSIBLE IMPACTS OF RELATIVE SEA LEVEL RISE IN THE COASTAL AREAS IN CHINA Why we should focus on the relative sea level rise? Is it more importa nt than theoretical sea level rise? You can find answers in this paper. In addition, the paper provides the prediction of relative sea level rise in the coastal areas in China on the b ase of global sea level rise value drew up by IPCC in 1992. The relative sea level will rise i n tens of future years but there will be different degree in different areas. Its impacts constitute a serious menace to the sustained development of coastal areas as follows: coa stal erosion and the length of erosion may increase; acceleration and increasing frequenc y of storm surge; more and more coastal lowland inundation; salinity intrusion, qua lity degradation of drinking water; decrease of function of the coastal defense projects.
use std::collections::hash_map::Entry; use std::collections::HashMap; use std::sync::atomic::Ordering; use std::sync::Arc; use bytes::{BufMut, Bytes, BytesMut}; use futures::sync::mpsc::UnboundedSender; use futures::sync::oneshot; use futures::{Future, Sink}; use slog::{debug, warn, Logger}; use tokio::runtime::current_thread::spawn; use bioyino_metric::parser::{MetricParser, ParseErrorHandler}; use bioyino_metric::Metric; use crate::aggregate::AggregateOptions; use crate::config::System; use crate::{Cache, Float, AGG_ERRORS, DROPS, INGRESS_METRICS, PARSE_ERRORS, PEER_ERRORS}; #[derive(Debug)] pub struct AggregateData { pub buf: BytesMut, pub name: Bytes, pub metric: Metric<Float>, pub options: AggregateOptions, pub response: UnboundedSender<(Bytes, Float)>, } #[derive(Debug)] pub enum Task { Parse(u64, BytesMut), AddMetric(Bytes, Metric<Float>), AddMetrics(Vec<(Bytes, Metric<Float>)>), AddSnapshot(Vec<(Bytes, Metric<Float>)>), TakeSnapshot(oneshot::Sender<Cache>), Rotate(oneshot::Sender<Cache>), Aggregate(AggregateData), } fn update_metric(cache: &mut Cache, name: Bytes, metric: Metric<Float>) { match cache.entry(name) { Entry::Occupied(ref mut entry) => { entry.get_mut().aggregate(metric).unwrap_or_else(|_| { AGG_ERRORS.fetch_add(1, Ordering::Relaxed); }); } Entry::Vacant(entry) => { entry.insert(metric); } }; } #[derive(Debug)] pub struct TaskRunner { long: HashMap<Bytes, Metric<Float>>, short: HashMap<Bytes, Metric<Float>>, buffers: HashMap<u64, (usize, BytesMut)>, config: Arc<System>, log: Logger, } impl TaskRunner { pub fn new(log: Logger, config: Arc<System>, cap: usize) -> Self { Self { long: HashMap::with_capacity(cap), short: HashMap::with_capacity(cap), buffers: HashMap::with_capacity(cap), config, log } } pub fn run(&mut self, task: Task) { match task { Task::Parse(addr, buf) => { let log = if self.config.metrics.log_parse_errors { Some(self.log.clone()) } else { None }; let buf = { let len = buf.len(); let (_, ref mut prev_buf) = self .buffers .entry(addr) .and_modify(|(times, _)| { *times = 0; }) .or_insert((0, BytesMut::with_capacity(len))); prev_buf.reserve(buf.len()); prev_buf.put(buf); prev_buf }; let parser = MetricParser::new(buf, self.config.metrics.max_unparsed_buffer, TaskParseErrorHandler(log)); for (name, metric) in parser { INGRESS_METRICS.fetch_add(1, Ordering::Relaxed); update_metric(&mut self.short, name, metric); } } Task::AddMetric(name, metric) => update_metric(&mut self.short, name, metric), Task::AddMetrics(mut list) => { list.drain(..).map(|(name, metric)| update_metric(&mut self.short, name, metric)).last(); } Task::AddSnapshot(mut list) => { // snapshots go to long cache to avoid being duplicated to other nodes list.drain(..).map(|(name, metric)| update_metric(&mut self.long, name, metric)).last(); } Task::TakeSnapshot(channel) => { // clone short cache for further sending let short = self.short.clone(); // join short cache to long cache removing data from short { let mut long = &mut self.long; // self.long cannot be borrowed in map, so we borrow it earlier self.short.drain().map(|(name, metric)| update_metric(&mut long, name, metric)).last(); } // self.short now contains empty hashmap because of draining // give a copy of snapshot to requestor channel.send(short).unwrap_or_else(|_| { PEER_ERRORS.fetch_add(1, Ordering::Relaxed); debug!(self.log, "shapshot not sent"); }); } Task::Rotate(channel) => { let rotated = self.long.clone(); self.long.clear(); let log = self.log.clone(); channel.send(rotated).unwrap_or_else(|_| { debug!(log, "rotated data not sent"); DROPS.fetch_add(1, Ordering::Relaxed); }); self.buffers.retain(|_, (ref mut times, _)| { *times += 1; *times < 5 }); } Task::Aggregate(data) => aggregate_task(data), } } // used in tests in peer.rs pub fn get_long_entry(&self, e: &Bytes) -> Option<&Metric<Float>> { self.long.get(e) } pub fn get_short_entry(&self, e: &Bytes) -> Option<&Metric<Float>> { self.short.get(e) } } pub fn aggregate_task(data: AggregateData) { let AggregateData { mut buf, name, metric, options, response } = data; let upd = if let Some(options) = options.update_counter { if metric.update_counter > options.threshold { // + 2 is for dots let cut_len = options.prefix.len() + name.len() + options.suffix.len() + 2; buf.reserve(cut_len); if options.prefix.len() > 0 { buf.put_slice(&options.prefix); buf.put_slice(b"."); } buf.put_slice(&name); if options.suffix.len() > 0 { buf.put_slice(b"."); buf.put_slice(&options.suffix); } let counter = buf.take().freeze(); Some((counter, metric.update_counter.into())) } else { None } } else { None }; metric .into_iter() .map(move |(suffix, value)| { buf.extend_from_slice(&name); buf.extend_from_slice(suffix.as_bytes()); let name = buf.take().freeze(); (name, value) }) .chain(upd) .map(|data| { spawn( response .clone() .send(data) .map_err(|_| { AGG_ERRORS.fetch_add(1, Ordering::Relaxed); }) .map(|_| ()), ); }) .last(); } struct TaskParseErrorHandler(Option<Logger>); impl ParseErrorHandler for TaskParseErrorHandler { fn handle(&self, input: &[u8], pos: usize) { PARSE_ERRORS.fetch_add(1, Ordering::Relaxed); if let Some(ref log) = self.0 { if let Ok(string) = std::str::from_utf8(input) { warn!(log, "parsing error"; "buffer"=> format!("{:?}", string), "position"=>format!("{}", pos)); } else { warn!(log, "parsing error (bad unicode)"; "buffer"=> format!("{:?}", input), "position"=>format!("{}", pos)); } } } } #[cfg(test)] mod tests { use super::*; use metric::MetricType; use crate::util::prepare_log; #[test] fn parse_trashed_metric_buf() { let mut data = BytesMut::new(); data.extend_from_slice(b"trash\ngorets1:+1000|g\nTRASH\ngorets2:-1000|g|@0.5\nMORETrasH\nFUUU"); let mut config = System::default(); config.metrics.log_parse_errors = true; let mut runner = TaskRunner::new(prepare_log("parse_trashed"), Arc::new(config), 16); runner.run(Task::Parse(2, data)); let key: Bytes = "gorets1".into(); let metric = runner.short.get(&key).unwrap().clone(); assert_eq!(metric.value, 1000f64); assert_eq!(metric.mtype, MetricType::Gauge(Some(1i8))); assert_eq!(metric.sampling, None); let key: Bytes = "gorets2".into(); let metric = runner.short.get(&key).unwrap().clone(); assert_eq!(metric.value, 1000f64); assert_eq!(metric.mtype, MetricType::Gauge(Some(-1i8))); assert_eq!(metric.sampling, Some(0.5f32)); } }
<reponame>andreagia/peaks-identification import matplotlib.pyplot as plt import plotly.express as px import pandas as pd # from scipy.spatial import distance_matrix import plotly.graph_objects as go from peaksIdentification.postprocessing import sliding_avg import numpy as np def plotplot(spettro_old, spettro_new, associations): print(associations) print(len(associations)) """ :param spettro_old: dataframe :param spettro_new: dataframe :param associations: [('k_old_1', 'k_new_1', d), ('k_old_2', 'k_new_2', d), ..., ('k_old_N', 'k_new_N', d),] """ X = spettro_old.dd.to_numpy() keys_X = spettro_old.dd.index.tolist() Y = spettro_new.dd.to_numpy() keys_Y = spettro_new.dd.index.tolist() fig = go.Figure() fig.update_layout(width=1300, height=1000) fig['layout']['xaxis']['autorange'] = "reversed" fig['layout']['yaxis']['autorange'] = "reversed" fig.add_trace( go.Scatter( mode='markers+text', x=X[:, 0], y=X[:, 1]*0.2, marker=dict( color='LightSkyBlue', size=4, line=dict( color='MediumPurple', width=1 ) ), name='Spectra1', text=keys_X, textposition="bottom center" )) fig.add_trace( go.Scatter( mode='markers+text', x=Y[:, 0], y=Y[:, 1]*0.2, marker=dict( color='Coral', size=4, line=dict( color='MediumPurple', width=1 ) ), name='Spectra2', text=keys_Y, textposition="bottom center" )) fig.show() hist = [] histn = [] histi = [] for triple in associations: # print(triple) key_old = triple[0] key_new = triple[1] dist = triple[2] hist.append(dist) histn.append(key_old) strnkey1 = ''.join(char for char in key_old if char.isnumeric()) histi.append(int(strnkey1)) old_p_xy = spettro_old.dd.loc[key_old].to_numpy(dtype=float) new_p_xy = spettro_new.dd.loc[key_new].to_numpy(dtype=float) ddist = np.sqrt(((old_p_xy - new_p_xy)**2).sum()) #print("??", old_p_xy, new_p_xy, ddist) if key_old == key_new: color = "MediumPurple" else: color = "red" fig.add_trace(go.Scatter(x=[old_p_xy[0], new_p_xy[0]], y=[old_p_xy[1]*0.2, new_p_xy[1]*0.2], mode='lines', showlegend=False, text='provaaa', line=dict(color=color))) fig.show() ''' df1 = pd.DataFrame({"DistanceC": hist, "Index":histi, "Name":histn}) df1 = df1.sort_values(by=['Index']) print(df1) fig2 = px.bar(df1, x = "Name", y="DistanceC") fig2.show() ''' def plotHistogram(df1, real_dist_dict=None): #print("REAL DIST DICT ", real_dist_dict) # real distance dictionary #print("SIZE REAL DIST DICT ", len(real_dist_dict)) #print("= = = = =>\n",df1) #distances = df1['Distance'].tolist() #sl_avg = sliding_avg(distances, half_window_size=3) # sliding window avg on our estimated shift distances #print(sl_avg) #print(len(df1['Name'].tolist()), df1['Name'].tolist()) #print(len(df1['Index'].tolist()), df1['Index'].tolist()) #print("==============") #df1['window_avg'] = sl_avg sl_avg = df1['window_avg'] # plot dello istogramma #fig2 = px.bar(df1, x = "Name", y="Distance") fig2 = go.Figure() fig2.add_trace(go.Bar( x=df1["Name"], y=df1["Distance"], name='DistanceGG', marker_color='blue' )) if real_dist_dict is not None: #fig2 = px.bar(df1, x="Name", y="Real_dist") fig2.add_trace(go.Bar( x=df1["Name"], y=df1["Real_dist"], name='Distance real', marker_color='lightgray' )) # plotta il punto indicante il valore della media mobile fig2.add_trace( go.Scatter( mode='markers+text+lines', x=df1['Name'], y=sl_avg, marker=dict( color='Coral', size=4, line=dict(color='MediumPurple',width=1 ) ), name='Window avg', text='', textposition="bottom center" )) fig2.show() #return sl_avg, df1['Name'].tolist(), df1['Index'].tolist()
import java.io.*; import java.util.*; public class movie { public static void main(String args[])throws IOException { Scanner sc=new Scanner(System.in); int a=sc.nextInt();//total entries int b=sc.nextInt();//skip time int time=0;int e=0; for(int i=0;i<a;i++) { int c=sc.nextInt();//lower time int d=sc.nextInt();//upper time while((e+b)<c) e=e+b; time+=d-e;e=d; } System.out.println(time); }}
<reponame>chandhukogila/Pattern-Package<filename>Alphabets/Capital_Alphabets/T.py def for_T(): """We are creating user defined function for alphabetical pattern of capital T with "*" symbol""" row=7 col=5 for i in range(row): for j in range(col): if i==0 or j==2: print("*",end=" ") else: print(" ",end=" ") print() def while_T(): i=0 while i<7: j=0 while j<5: if i==0 or j==2: print("*",end=" ") else: print(" ",end=" ") j+=1 i+=1 print()
// Test observable with identity pauli operator because calculation was unstable // in this situation. TEST(ObservableTest, MinimumEigenvalueByArnoldiMethodWithIdentity) { constexpr double eps = 1e-6; constexpr UINT test_count = 10; Random random; for (UINT i = 0; i < test_count; i++) { const auto qubit_count = random.int32() % 4 + 3; const auto operator_count = random.int32() % 10 + 2; auto observable = Observable(qubit_count); observable.add_random_operator(operator_count); add_identity(&observable, random); test_eigenvalue(observable, 70, eps, CalculationMethod::ArnoldiMethod); } }
# # Copyright (c) 2019 ISP RAS (http://www.ispras.ru) # Ivannikov Institute for System Programming of the Russian Academy of Sciences # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import json from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from django.urls import reverse from bridge.utils import KleverTestCase, ArchiveFileContent from bridge.vars import ( SAFE_VERDICTS, UNSAFE_VERDICTS, MARK_SAFE, MARK_UNSAFE, MARK_STATUS, PROBLEM_DESC_FILE, ASSOCIATION_TYPE ) from users.models import User from jobs.models import Job from reports.models import ReportSafe, ReportUnsafe, ReportUnknown, ReportComponent from marks.models import ( MarkSafe, MarkUnsafe, MarkUnknown, MarkSafeHistory, MarkUnsafeHistory, MarkUnknownHistory, SafeTag, UnsafeTag, MarkSafeTag, MarkUnsafeTag, MarkSafeReport, MarkUnsafeReport, MarkUnknownReport, SafeAssociationLike, UnsafeAssociationLike, UnknownAssociationLike ) from reports.test import DecideJobs, SJC_1 REPORT_ARCHIVES = os.path.join(settings.BASE_DIR, 'reports', 'test_files') class TestMarks(KleverTestCase): def setUp(self): super(TestMarks, self).setUp() User.objects.create_superuser('superuser', '', 'top_secret') populate_users( manager={'username': 'manager', 'password': '<PASSWORD>'}, service={'username': 'service', 'password': '<PASSWORD>'} ) self.client.post(reverse('users:login'), {'username': 'manager', 'password': '<PASSWORD>'}) self.client.post(reverse('population')) self.job = Job.objects.all().first() self.assertIsNotNone(self.job) self.client.post('/jobs/run_decision/%s/' % self.job.pk, {'mode': 'default', 'conf_name': 'development'}) DecideJobs('service', 'service', SJC_1) self.safe_archive = 'test_safemark.zip' self.unsafe_archive = 'test_unsafemark.zip' self.unknown_archive = 'test_unknownmark.zip' self.test_tagsfile = 'test_tags.json' self.all_marks_arch = 'All-marks.zip' def test_safe(self): self.assertEqual(Job.objects.get(pk=self.job.pk).status, JOB_STATUS[3][0]) # Delete populated marks response = self.client.post('/marks/delete/', { 'type': 'safe', 'ids': json.dumps(list(MarkSafe.objects.values_list('id', flat=True))) }) self.assertEqual(response.status_code, 200) response = self.client.post('/marks/delete/', { 'type': 'unsafe', 'ids': json.dumps(list(MarkUnsafe.objects.values_list('id', flat=True))) }) self.assertEqual(response.status_code, 200) response = self.client.post('/marks/delete/', { 'type': 'unknown', 'ids': json.dumps(list(MarkUnknown.objects.values_list('id', flat=True))) }) self.assertEqual(response.status_code, 200) # Create 5 safe tags created_tags = [] response = self.client.post('/marks/tags/save_tag/', { 'action': 'create', 'tag_type': 'safe', 'parent_id': '0', 'name': 'test:safe:tag:1', 'description': 'Test safe tag description' }) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) try: created_tags.append(SafeTag.objects.get(tag='test:safe:tag:1')) except ObjectDoesNotExist: self.fail('Safe tag was not created') self.assertEqual(created_tags[0].description, 'Test safe tag description') self.assertEqual(created_tags[0].parent, None) for i in range(2, 6): self.client.post('/marks/tags/save_tag/', { 'action': 'create', 'tag_type': 'safe', 'parent_id': created_tags[i - 2].pk, 'name': 'test:safe:tag:%s' % i, 'description': '' }) created_tags.append(SafeTag.objects.get(tag='test:safe:tag:%s' % i)) self.assertEqual(created_tags[i - 1].parent, created_tags[i - 2]) # Get tag parents for editing tag 'test:safe:tag:3' response = self.client.post('/marks/tags/safe/get_tag_data/', {'tag_id': created_tags[2].pk}) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) # Get tag parents for creating new tag response = self.client.post('/marks/tags/safe/get_tag_data/') self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) # Edit 5th tag response = self.client.post('/marks/tags/save_tag/', { 'action': 'edit', 'tag_type': 'safe', 'parent_id': created_tags[2].pk, 'name': 'test:safe:tag:5', 'tag_id': created_tags[4].pk, 'description': 'Test safe tag 5 description' }) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) try: created_tags[4] = SafeTag.objects.get(tag='test:safe:tag:5') except ObjectDoesNotExist: self.fail('Tag 5 was not found after editing') self.assertEqual(created_tags[4].parent, created_tags[2]) self.assertEqual(created_tags[4].description, 'Test safe tag 5 description') # Remove 3d tag and check that its children (tag4 and tag5) are also removed response = self.client.post('/marks/tags/safe/delete/%s/' % created_tags[2].pk) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual( SafeTag.objects.filter(tag__in=['test:safe:tag:3', 'test:safe:tag:4', 'test:safe:tag:5']).count(), 0 ) del created_tags[2:] # Get tags data (for edit/create mark page). Just check that there is no error in response. response = self.client.post('/marks/safe/tags_data/', {'selected_tags': json.dumps([created_tags[1].pk])}) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) # Download tags response = self.client.get(reverse('marks:download_tags', args=['safe'])) self.assertEqual(response.status_code, 200) with open(os.path.join(settings.MEDIA_ROOT, self.test_tagsfile), mode='wb') as fp: for chunk in response.streaming_content: fp.write(chunk) SafeTag.objects.all().delete() # Upload tags with open(os.path.join(settings.MEDIA_ROOT, self.test_tagsfile), mode='rb') as fp: response = self.client.post('/marks/tags/safe/upload/', {'file': fp}) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) for i in range(0, len(created_tags)): try: created_tags[i] = SafeTag.objects.get(tag=created_tags[i].tag) except ObjectDoesNotExist: self.fail("Tags weren't uploaded") # Tags tree page response = self.client.get(reverse('marks:tags', args=['safe'])) self.assertEqual(response.status_code, 200) # Get report safe = ReportSafe.objects.filter(root__job_id=self.job.pk).first() self.assertIsNotNone(safe) # Inline mark form response = self.client.get('/marks/safe/%s/create/inline/' % safe.id) self.assertEqual(response.status_code, 200) # Create mark page response = self.client.get(reverse('marks:mark_form', args=['safe', safe.pk, 'create'])) self.assertEqual(response.status_code, 200) # Save mark compare_attrs = list({'is_compare': associate, 'attr': a_name} for a_name, associate in safe.attrs.values_list('attr__name__name', 'associate')) response = self.client.post(reverse('marks:mark_form', args=['safe', safe.pk, 'create']), { 'data': json.dumps({ 'description': 'Mark description', 'is_modifiable': True, 'verdict': MARK_SAFE[1][0], 'status': MARK_STATUS[2][0], 'tags': [created_tags[1].pk], 'attrs': compare_attrs }) }) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') res = json.loads(str(response.content, encoding='utf8')) self.assertIsNone(res.get('error')) self.assertIn('cache_id', res) cache_id = res['cache_id'] # Check mark's tables try: mark = MarkSafe.objects.get(job=self.job, author__username='manager') except ObjectDoesNotExist: self.fail('Mark was not created') self.assertEqual(mark.type, MARK_TYPE[0][0]) self.assertEqual(mark.verdict, MARK_SAFE[1][0]) self.assertEqual(mark.status, MARK_STATUS[2][0]) self.assertEqual(mark.version, 1) self.assertEqual(mark.description, 'Mark description') self.assertEqual(mark.is_modifiable, True) self.assertEqual(len(mark.versions.all()), 1) mark_version = MarkSafeHistory.objects.get(mark=mark) self.assertEqual(mark_version.verdict, mark.verdict) self.assertEqual(mark_version.version, 1) self.assertEqual(mark_version.author.username, 'manager') self.assertEqual(mark_version.status, mark.status) self.assertEqual(mark_version.description, mark.description) for mark_attr in mark_version.attrs.all(): self.assertIn({'is_compare': mark_attr.is_compare, 'attr': mark_attr.attr.name.name}, compare_attrs) self.assertEqual(ReportSafe.objects.get(pk=safe.pk).verdict, SAFE_VERDICTS[1][0]) self.assertEqual(MarkSafeReport.objects.filter(mark=mark, report=safe, type=ASSOCIATION_TYPE[1][0]).count(), 1) self.assertEqual(len(MarkSafeTag.objects.filter(mark_version=mark_version, tag=created_tags[0])), 1) self.assertEqual(len(MarkSafeTag.objects.filter(mark_version=mark_version, tag=created_tags[1])), 1) try: rst = ReportSafeTag.objects.get(report__root__job=self.job, report__parent=None, tag=created_tags[0]) self.assertEqual(rst.number, 1) rst = ReportSafeTag.objects.get(report__root__job=self.job, report__parent=None, tag=created_tags[1]) self.assertEqual(rst.number, 1) rst = ReportSafeTag.objects.get(report__root__job=self.job, report_id=safe.parent_id, tag=created_tags[0]) self.assertEqual(rst.number, 1) rst = ReportSafeTag.objects.get(report__root__job=self.job, report__id=safe.parent_id, tag=created_tags[1]) self.assertEqual(rst.number, 1) srt = SafeReportTag.objects.get(report=safe, tag=created_tags[0]) self.assertEqual(srt.number, 1) srt = SafeReportTag.objects.get(report=safe, tag=created_tags[1]) self.assertEqual(srt.number, 1) except ObjectDoesNotExist: self.fail('Reports tags cache was not filled') # Associations changes response = self.client.get('/marks/safe/association_changes/%s/' % cache_id) self.assertEqual(response.status_code, 200) # Edit mark page response = self.client.get(reverse('marks:mark_form', args=['safe', mark.pk, 'edit'])) self.assertEqual(response.status_code, 200) # Edit mark response = self.client.post(reverse('marks:mark_form', args=['safe', mark.pk, 'edit']), { 'data': json.dumps({ 'description': 'New mark description', 'is_modifiable': True, 'verdict': MARK_SAFE[2][0], 'status': MARK_STATUS[2][0], 'tags': [created_tags[0].pk], 'attrs': compare_attrs, 'comment': 'Change 1' }) }) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') res = json.loads(str(response.content, encoding='utf8')) self.assertIsNone(res.get('error')) self.assertIn('cache_id', res) cache_id = res['cache_id'] # Check mark's tables try: mark = MarkSafe.objects.get(job=self.job, author__username='manager') except ObjectDoesNotExist: self.fail('Mark was not created') self.assertEqual(mark.verdict, MARK_SAFE[2][0]) self.assertEqual(mark.version, 2) self.assertEqual(mark.description, 'New mark description') self.assertEqual(mark.is_modifiable, True) self.assertEqual(len(mark.versions.all()), 2) mark_version = MarkSafeHistory.objects.filter(mark=mark).order_by('-version').first() self.assertEqual(mark_version.version, 2) self.assertEqual(mark_version.verdict, mark.verdict) self.assertEqual(mark_version.author.username, 'manager') self.assertEqual(mark_version.description, mark.description) self.assertEqual(mark_version.comment, 'Change 1') self.assertEqual(ReportSafe.objects.get(pk=safe.pk).verdict, SAFE_VERDICTS[2][0]) self.assertEqual(len(MarkSafeReport.objects.filter(mark=mark, report=safe)), 1) self.assertEqual(len(MarkSafeTag.objects.filter(mark_version=mark_version, tag=created_tags[0])), 1) self.assertEqual(len(MarkSafeTag.objects.filter(mark_version=mark_version, tag=created_tags[1])), 0) self.assertEqual(len(ReportSafeTag.objects.filter(report__root__job=self.job, report__parent=None)), 1) self.assertEqual(len(ReportSafeTag.objects.filter(report__root__job=self.job, report__id=safe.parent_id)), 1) try: srt = SafeReportTag.objects.get(report=safe, tag=created_tags[0]) self.assertEqual(srt.number, 1) except ObjectDoesNotExist: self.fail('Reports tags cache was not filled') self.assertEqual(len(SafeReportTag.objects.filter(report=safe, tag=created_tags[1])), 0) # Associations changes response = self.client.get('/marks/safe/association_changes/%s/' % cache_id) self.assertEqual(response.status_code, 200) # Safe marks list page response = self.client.get(reverse('marks:list', args=['safe'])) self.assertEqual(response.status_code, 200) response = self.client.get(reverse('marks:mark', args=['safe', mark.id])) self.assertEqual(response.status_code, 200) # Inline mark form response = self.client.get('/marks/safe/%s/edit/inline/' % mark.id) self.assertEqual(response.status_code, 200) # Confirm/unconfirm association # Mark is automatically associated after its changes self.assertEqual(MarkSafeReport.objects.filter(mark=mark, report=safe, type=ASSOCIATION_TYPE[0][0]).count(), 1) response = self.client.post('/marks/association/safe/%s/%s/unconfirm/' % (safe.pk, mark.pk)) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual(MarkSafeReport.objects.filter(mark=mark, report=safe, type=ASSOCIATION_TYPE[2][0]).count(), 1) response = self.client.post('/marks/association/safe/%s/%s/confirm/' % (safe.pk, mark.pk)) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual(MarkSafeReport.objects.filter(mark=mark, report=safe, type=ASSOCIATION_TYPE[1][0]).count(), 1) # Like/dislike association response = self.client.post('/marks/association/safe/%s/%s/like/' % (safe.id, mark.id)) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual(SafeAssociationLike.objects.filter( association__report=safe, association__mark=mark, dislike=False ).count(), 1) response = self.client.post('/marks/association/safe/%s/%s/dislike/' % (safe.id, mark.id)) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual(SafeAssociationLike.objects.filter( association__report=safe, association__mark=mark, dislike=True ).count(), 1) self.assertEqual(SafeAssociationLike.objects.filter( association__report=safe, association__mark=mark, dislike=False ).count(), 0) # Download mark response = self.client.get(reverse('marks:safe-download', args=[mark.pk])) self.assertEqual(response.status_code, 200) self.assertIn(response['Content-Type'], {'application/x-zip-compressed', 'application/zip'}) with open(os.path.join(settings.MEDIA_ROOT, self.safe_archive), mode='wb') as fp: for content in response.streaming_content: fp.write(content) # Download mark in preset format response = self.client.get(reverse('marks:safe-download-preset', args=[mark.pk])) self.assertEqual(response.status_code, 200) # Delete mark response = self.client.post('/marks/delete/', {'type': 'safe', 'ids': json.dumps([mark.id])}) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual(len(MarkSafe.objects.all()), 0) self.assertEqual(len(MarkSafeReport.objects.all()), 0) self.assertEqual(ReportSafe.objects.all().first().verdict, SAFE_VERDICTS[4][0]) # Upload mark with open(os.path.join(settings.MEDIA_ROOT, self.safe_archive), mode='rb') as fp: response = self.client.post('/marks/upload/', {'file': fp}) fp.close() self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') res = json.loads(str(response.content, encoding='utf8')) self.assertIn('id', res) self.assertEqual(res.get('type'), 'safe') self.assertEqual(len(MarkSafe.objects.all()), 1) try: newmark = MarkSafe.objects.get(pk=res['id']) except ObjectDoesNotExist: self.fail('Mark was not uploaded') self.assertEqual(newmark.type, MARK_TYPE[2][0]) self.assertEqual(newmark.verdict, MARK_SAFE[2][0]) self.assertEqual(newmark.version, 2) self.assertEqual(newmark.description, 'New mark description') self.assertEqual(newmark.is_modifiable, True) self.assertEqual(len(newmark.versions.all()), 2) newmark_version = MarkSafeHistory.objects.filter(mark=newmark).order_by('-version').first() self.assertEqual(newmark_version.version, 2) self.assertEqual(newmark_version.verdict, mark.verdict) self.assertEqual(newmark_version.author.username, 'manager') self.assertEqual(newmark_version.description, mark.description) self.assertEqual(newmark_version.comment, 'Change 1') self.assertEqual(ReportSafe.objects.get(pk=safe.pk).verdict, SAFE_VERDICTS[2][0]) self.assertEqual(len(MarkSafeReport.objects.filter(mark=newmark, report=safe)), 1) self.assertEqual(len(MarkSafeReport.objects.filter(report=safe)), 1) self.assertEqual(len(MarkSafeTag.objects.filter(mark_version=newmark_version, tag=created_tags[0])), 1) self.assertEqual(len(MarkSafeTag.objects.filter(mark_version=newmark_version, tag=created_tags[1])), 0) self.assertEqual(len(ReportSafeTag.objects.filter(report__root__job=self.job, report__parent=None)), 1) self.assertEqual(len(ReportSafeTag.objects.filter(report__root__job=self.job, report__id=safe.parent_id)), 1) # Some more mark changes for i in range(3, 6): response = self.client.post(reverse('marks:mark_form', args=['safe', newmark.pk, 'edit']), { 'data': json.dumps({ 'description': 'New mark description', 'is_modifiable': True, 'verdict': MARK_SAFE[2][0], 'status': MARK_STATUS[2][0], 'tags': [created_tags[0].pk], 'attrs': compare_attrs, 'comment': 'Change %s' % i }) }) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual(len(MarkSafeHistory.objects.filter(mark=newmark)), 5) # Get 3d version data response = self.client.get(reverse('marks:mark_form', args=['safe', newmark.pk, 'edit']), params={'version': 3}) self.assertEqual(response.status_code, 200) # Compare 1st and 4th versions response = self.client.post('/marks/safe/%s/compare_versions/' % newmark.pk, {'v1': 1, 'v2': 4}) self.assertEqual(response.status_code, 200) # Remove 2nd and 4th versions response = self.client.post('/marks/safe/%s/remove_versions/' % newmark.pk, {'versions': json.dumps([2, 4])}) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') res = json.loads(str(response.content, encoding='utf8')) self.assertNotIn('error', res) self.assertIn('success', res) self.assertEqual(len(MarkSafeHistory.objects.filter(mark=newmark)), 3) # Reports' lists pages root_comp = ReportComponent.objects.get(root__job_id=self.job.pk, parent=None) response = self.client.get('%s?tag=%s' % (reverse('reports:safes', args=[root_comp.pk]), created_tags[0].pk)) self.assertIn(response.status_code, {200, 302}) response = self.client.get('%s?tag=%s' % (reverse('reports:safes', args=[root_comp.pk]), created_tags[1].pk)) self.assertIn(response.status_code, {200, 302}) response = self.client.get( '%s?verdict=%s' % (reverse('reports:safes', args=[root_comp.pk]), SAFE_VERDICTS[0][0]) ) self.assertIn(response.status_code, {200, 302}) response = self.client.get( '%s?verdict=%s' % (reverse('reports:safes', args=[root_comp.pk]), SAFE_VERDICTS[2][0]) ) self.assertIn(response.status_code, {200, 302}) # Download all marks response = self.client.get('/marks/api/download-all/') self.assertEqual(response.status_code, 200) self.assertNotEqual(response['Content-Type'], 'application/json') with open(os.path.join(settings.MEDIA_ROOT, self.all_marks_arch), mode='wb') as fp: for content in response.streaming_content: fp.write(content) # Delete all safe marks self.client.post('/marks/delete/', { 'type': 'safe', 'ids': json.dumps(list(MarkSafe.objects.values_list('id', flat=True))) }) self.assertEqual(MarkSafe.objects.count(), 0) # All verdicts must be "safe unmarked" self.assertEqual( len(ReportSafe.objects.filter(verdict=SAFE_VERDICTS[4][0])), len(ReportSafe.objects.all()) ) self.assertEqual(len(MarkSafeReport.objects.all()), 0) # Upload all marks with open(os.path.join(settings.MEDIA_ROOT, self.all_marks_arch), mode='rb') as fp: response = self.client.post('/marks/upload-all/', {'delete': 1, 'file': fp}) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual(int(json.loads(str(response.content, encoding='utf8'))['fail']), 0) self.assertEqual(int(json.loads(str(response.content, encoding='utf8'))['safe']), 1) def test_unsafe(self): self.assertEqual(Job.objects.get(pk=self.job.pk).status, JOB_STATUS[3][0]) # Delete populated marks response = self.client.post('/marks/delete/', { 'type': 'safe', 'ids': json.dumps(list(MarkSafe.objects.values_list('id', flat=True))) }) self.assertEqual(response.status_code, 200) response = self.client.post('/marks/delete/', { 'type': 'unsafe', 'ids': json.dumps(list(MarkUnsafe.objects.values_list('id', flat=True))) }) self.assertEqual(response.status_code, 200) response = self.client.post('/marks/delete/', { 'type': 'unknown', 'ids': json.dumps(list(MarkUnknown.objects.values_list('id', flat=True))) }) self.assertEqual(response.status_code, 200) # Create 5 unsafe tags created_tags = [] response = self.client.post('/marks/tags/save_tag/', { 'action': 'create', 'tag_type': 'unsafe', 'parent_id': '0', 'name': 'test:unsafe:tag:1', 'description': 'Test unsafe tag description' }) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) try: created_tags.append(UnsafeTag.objects.get(tag='test:unsafe:tag:1')) except ObjectDoesNotExist: self.fail('Unsafe tag was not created') self.assertEqual(created_tags[0].description, 'Test unsafe tag description') self.assertEqual(created_tags[0].parent, None) for i in range(2, 6): self.client.post('/marks/tags/save_tag/', { 'action': 'create', 'tag_type': 'unsafe', 'parent_id': created_tags[i - 2].pk, 'name': 'test:unsafe:tag:%s' % i, 'description': '' }) created_tags.append(UnsafeTag.objects.get(tag='test:unsafe:tag:%s' % i)) self.assertEqual(created_tags[i - 1].parent, created_tags[i - 2]) # Get tag parents for editing tag 'test:unsafe:tag:3' response = self.client.post('/marks/tags/unsafe/get_tag_data/', {'tag_id': created_tags[2].pk}) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) # Get tag parents for creating new tag response = self.client.post('/marks/tags/unsafe/get_tag_data/') self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) # Edit 5th tag response = self.client.post('/marks/tags/save_tag/', { 'action': 'edit', 'tag_type': 'unsafe', 'parent_id': created_tags[2].pk, 'name': 'test:unsafe:tag:5', 'tag_id': created_tags[4].pk, 'description': 'Test unsafe tag 5 description' }) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) try: created_tags[4] = UnsafeTag.objects.get(tag='test:unsafe:tag:5') except ObjectDoesNotExist: self.fail('Tag 5 was not found after editing') self.assertEqual(created_tags[4].parent, created_tags[2]) self.assertEqual(created_tags[4].description, 'Test unsafe tag 5 description') # Remove 3d tag and check that its children (tag4 and tag5) are also removed response = self.client.post('/marks/tags/unsafe/delete/%s/' % created_tags[2].pk) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual( len(UnsafeTag.objects.filter(tag__in=['test:unsafe:tag:3', 'test:unsafe:tag:4', 'test:unsafe:tag:5'])), 0 ) del created_tags[2:] # Get tags data (for edit/create mark page). Just check that there is no error in response. response = self.client.post('/marks/unsafe/tags_data/', {'selected_tags': json.dumps([created_tags[1].pk])}) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) # Download tags response = self.client.get(reverse('marks:download_tags', args=['unsafe'])) self.assertEqual(response.status_code, 200) with open(os.path.join(settings.MEDIA_ROOT, self.test_tagsfile), mode='wb') as fp: for chunk in response.streaming_content: fp.write(chunk) UnsafeTag.objects.all().delete() # Upload tags with open(os.path.join(settings.MEDIA_ROOT, self.test_tagsfile), mode='rb') as fp: response = self.client.post('/marks/tags/unsafe/upload/', {'file': fp}) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) for i in range(0, len(created_tags)): try: created_tags[i] = UnsafeTag.objects.get(tag=created_tags[i].tag) except ObjectDoesNotExist: self.fail("Tags weren't uploaded") # Tags tree page response = self.client.get(reverse('marks:tags', args=['unsafe'])) self.assertEqual(response.status_code, 200) # Get report unsafe = ReportUnsafe.objects.filter(root__job_id=self.job.pk).first() self.assertIsNotNone(unsafe) # Inline mark form response = self.client.get('/marks/unsafe/%s/create/inline/' % unsafe.id) self.assertEqual(response.status_code, 200) # Create mark page response = self.client.get(reverse('marks:mark_form', args=['unsafe', unsafe.pk, 'create'])) self.assertEqual(response.status_code, 200) # Error trace compare function description try: compare_f = MarkUnsafeCompare.objects.get(name=DEFAULT_COMPARE) except ObjectDoesNotExist: self.fail("Population hasn't created compare error trace functions") response = self.client.post('/marks/get_func_description/%s/' % compare_f.pk) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) # Save mark compare_attrs = list({'is_compare': associate, 'attr': a_name} for a_name, associate in unsafe.attrs.values_list('attr__name__name', 'associate')) response = self.client.post(reverse('marks:mark_form', args=['unsafe', unsafe.pk, 'create']), { 'data': json.dumps({ 'compare_id': compare_f.pk, 'description': 'Mark description', 'is_modifiable': True, 'verdict': MARK_UNSAFE[1][0], 'status': MARK_STATUS[2][0], 'tags': [created_tags[0].pk], 'attrs': compare_attrs }) }) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') res = json.loads(str(response.content, encoding='utf8')) self.assertNotIn('error', res) self.assertIn('cache_id', res) cache_id = res['cache_id'] # Check mark's tables try: mark = MarkUnsafe.objects.get(job=self.job, author__username='manager') except ObjectDoesNotExist: self.fail('Mark was not created') self.assertEqual(mark.type, MARK_TYPE[0][0]) self.assertEqual(mark.verdict, MARK_UNSAFE[1][0]) self.assertEqual(mark.status, MARK_STATUS[2][0]) self.assertEqual(mark.version, 1) self.assertEqual(mark.description, 'Mark description') self.assertEqual(mark.function.name, DEFAULT_COMPARE) self.assertEqual(mark.is_modifiable, True) self.assertEqual(len(mark.versions.all()), 1) mark_version = MarkUnsafeHistory.objects.get(mark=mark) self.assertEqual(mark_version.verdict, mark.verdict) self.assertEqual(mark_version.version, 1) self.assertEqual(mark_version.author.username, 'manager') self.assertEqual(mark_version.status, mark.status) self.assertEqual(mark_version.description, mark.description) for mark_attr in mark_version.attrs.all().select_related('attr__name'): self.assertIn({'is_compare': mark_attr.is_compare, 'attr': mark_attr.attr.name.name}, compare_attrs) self.assertEqual(ReportUnsafe.objects.get(pk=unsafe.pk).verdict, UNSAFE_VERDICTS[1][0]) self.assertEqual(len(MarkUnsafeReport.objects.filter(mark=mark, report=unsafe, type=ASSOCIATION_TYPE[1][0])), 1) self.assertEqual(len(MarkUnsafeTag.objects.filter(mark_version=mark_version, tag=created_tags[0])), 1) try: rst = ReportUnsafeTag.objects.get(report__root__job=self.job, report__parent=None, tag=created_tags[0]) # The number of unsafes for root report with specified tag equals the number of marked unsafes self.assertEqual(rst.number, len(ReportUnsafe.objects.filter(verdict=UNSAFE_VERDICTS[1][0]))) rst = ReportUnsafeTag.objects.get( report__root__job=self.job, report_id=unsafe.parent_id, tag=created_tags[0] ) # The number of unsafes for parent report (for unsafe) with specified tag # equals 1 due to only one unsafe is child for report self.assertEqual(rst.number, 1) srt = UnsafeReportTag.objects.get(report=unsafe, tag=created_tags[0]) self.assertEqual(srt.number, 1) except ObjectDoesNotExist: self.fail('Reports tags cache was not filled') # Associations changes response = self.client.get('/marks/unsafe/association_changes/%s/' % cache_id) self.assertEqual(response.status_code, 200) # Edit mark page response = self.client.get(reverse('marks:mark_form', args=['unsafe', mark.pk, 'edit'])) self.assertEqual(response.status_code, 200) # Edit mark with mark_version.error_trace.file as fp: error_trace = fp.read().decode('utf8') response = self.client.post(reverse('marks:mark_form', args=['unsafe', mark.pk, 'edit']), { 'data': json.dumps({ 'compare_id': compare_f.pk, 'description': 'New mark description', 'is_modifiable': True, 'verdict': MARK_UNSAFE[2][0], 'status': MARK_STATUS[2][0], 'tags': [created_tags[1].pk], 'attrs': compare_attrs, 'comment': 'Change 1', 'error_trace': error_trace }) }) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') res = json.loads(str(response.content, encoding='utf8')) self.assertNotIn('error', res) self.assertIn('cache_id', res) cache_id = res['cache_id'] # Check mark's tables try: mark = MarkUnsafe.objects.get(job=self.job, author__username='manager') except ObjectDoesNotExist: self.fail('Mark was not created') self.assertEqual(mark.verdict, MARK_UNSAFE[2][0]) self.assertEqual(mark.version, 2) self.assertEqual(mark.description, 'New mark description') self.assertEqual(mark.is_modifiable, True) self.assertEqual(len(mark.versions.all()), 2) mark_version = MarkUnsafeHistory.objects.filter(mark=mark).order_by('-version').first() self.assertEqual(mark_version.version, 2) self.assertEqual(mark_version.verdict, mark.verdict) self.assertEqual(mark_version.author.username, 'manager') self.assertEqual(mark_version.description, mark.description) self.assertEqual(mark_version.comment, 'Change 1') self.assertEqual(ReportUnsafe.objects.get(pk=unsafe.pk).verdict, SAFE_VERDICTS[2][0]) self.assertEqual(len(MarkUnsafeReport.objects.filter(mark=mark, report=unsafe)), 1) self.assertEqual(len(MarkUnsafeTag.objects.filter(mark_version=mark_version, tag=created_tags[0])), 1) self.assertEqual(len(MarkUnsafeTag.objects.filter(mark_version=mark_version, tag=created_tags[1])), 1) self.assertEqual(len(ReportUnsafeTag.objects.filter(report__root__job=self.job, report__parent=None)), 2) self.assertEqual(len( ReportUnsafeTag.objects.filter(report__root__job=self.job, report__id=unsafe.parent_id) ), 2) try: urt = UnsafeReportTag.objects.get(report=unsafe, tag=created_tags[0]) self.assertEqual(urt.number, 1) urt = UnsafeReportTag.objects.get(report=unsafe, tag=created_tags[1]) self.assertEqual(urt.number, 1) except ObjectDoesNotExist: self.fail('Reports tags cache was not filled') # Associations changes response = self.client.get('/marks/unsafe/association_changes/%s/' % cache_id) self.assertEqual(response.status_code, 200) # Unsafe marks list page response = self.client.get(reverse('marks:list', args=['unsafe'])) self.assertEqual(response.status_code, 200) response = self.client.get(reverse('marks:mark', args=['unsafe', mark.id])) self.assertEqual(response.status_code, 200) # Inline mark form response = self.client.get('/marks/unsafe/%s/edit/inline/' % mark.id) self.assertEqual(response.status_code, 200) # Confirm/unconfirm association # Mark is automatically associated after its changes self.assertEqual( MarkUnsafeReport.objects.filter(mark=mark, report=unsafe, type=ASSOCIATION_TYPE[0][0]).count(), 1 ) response = self.client.post('/marks/association/unsafe/%s/%s/unconfirm/' % (unsafe.pk, mark.pk)) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual(MarkUnsafeReport.objects.filter( mark=mark, report=unsafe, type=ASSOCIATION_TYPE[2][0]).count(), 1) response = self.client.post('/marks/association/unsafe/%s/%s/confirm/' % (unsafe.pk, mark.pk)) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual(MarkUnsafeReport.objects.filter( mark=mark, report=unsafe, type=ASSOCIATION_TYPE[1][0]).count(), 1) # Like/dislike association response = self.client.post('/marks/association/unsafe/%s/%s/like/' % (unsafe.id, mark.id)) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual(UnsafeAssociationLike.objects.filter( association__report=unsafe, association__mark=mark, dislike=False ).count(), 1) response = self.client.post('/marks/association/unsafe/%s/%s/dislike/' % (unsafe.id, mark.id)) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual(UnsafeAssociationLike.objects.filter( association__report=unsafe, association__mark=mark, dislike=True ).count(), 1) self.assertEqual(UnsafeAssociationLike.objects.filter( association__report=unsafe, association__mark=mark, dislike=False ).count(), 0) # Download mark response = self.client.get(reverse('marks:unsafe-download', args=[mark.pk])) self.assertEqual(response.status_code, 200) self.assertIn(response['Content-Type'], {'application/x-zip-compressed', 'application/zip'}) with open(os.path.join(settings.MEDIA_ROOT, self.unsafe_archive), mode='wb') as fp: for content in response.streaming_content: fp.write(content) # Download mark in preset format response = self.client.get(reverse('marks:unsafe-download-preset', args=[mark.pk])) self.assertEqual(response.status_code, 200) # Delete mark response = self.client.post('/marks/delete/', {'type': 'unsafe', 'ids': json.dumps([mark.id])}) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') res = json.loads(str(response.content, encoding='utf8')) self.assertNotIn('error', res) self.assertEqual(len(MarkUnsafe.objects.all()), 0) self.assertEqual(len(MarkUnsafeReport.objects.all()), 0) self.assertEqual(ReportUnsafe.objects.all().first().verdict, UNSAFE_VERDICTS[5][0]) # Upload mark with open(os.path.join(settings.MEDIA_ROOT, self.unsafe_archive), mode='rb') as fp: response = self.client.post('/marks/upload/', {'file': fp}) fp.close() self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') res = json.loads(str(response.content, encoding='utf8')) self.assertIn('id', res) self.assertEqual(res.get('type'), 'unsafe') self.assertEqual(len(MarkUnsafe.objects.all()), 1) try: newmark = MarkUnsafe.objects.get(pk=res['id']) except ObjectDoesNotExist: self.fail('Mark was not uploaded') self.assertEqual(newmark.type, MARK_TYPE[2][0]) self.assertEqual(newmark.verdict, MARK_UNSAFE[2][0]) self.assertEqual(newmark.version, 2) self.assertEqual(newmark.description, 'New mark description') self.assertEqual(newmark.is_modifiable, True) self.assertEqual(len(newmark.versions.all()), 2) newmark_version = MarkUnsafeHistory.objects.filter(mark=newmark).order_by('-version').first() self.assertEqual(newmark_version.version, 2) self.assertEqual(newmark_version.verdict, mark.verdict) self.assertEqual(newmark_version.author.username, 'manager') self.assertEqual(newmark_version.description, mark.description) self.assertEqual(newmark_version.comment, 'Change 1') self.assertEqual(ReportUnsafe.objects.get(pk=unsafe.pk).verdict, UNSAFE_VERDICTS[2][0]) self.assertEqual(len(MarkUnsafeReport.objects.filter(mark=newmark, report=unsafe)), 1) self.assertEqual(len(MarkUnsafeReport.objects.filter(report=unsafe)), 1) self.assertEqual(len(MarkUnsafeTag.objects.filter(mark_version=newmark_version, tag=created_tags[0])), 1) self.assertEqual(len(MarkUnsafeTag.objects.filter(mark_version=newmark_version, tag=created_tags[1])), 1) # The tag has parent which is also added to mark self.assertEqual( len(ReportUnsafeTag.objects.filter(report__root__job=self.job, report__parent=None)), len(ReportUnsafe.objects.filter(verdict=UNSAFE_VERDICTS[2][0])) * 2 ) self.assertEqual(len(ReportUnsafeTag.objects.filter( report__root__job=self.job, report__id=unsafe.parent_id )), 2) # Some more mark changes for i in range(3, 6): response = self.client.post(reverse('marks:mark_form', args=['unsafe', newmark.pk, 'edit']), { 'data': json.dumps({ 'compare_id': compare_f.pk, 'description': 'New mark description', 'is_modifiable': True, 'verdict': MARK_UNSAFE[2][0], 'status': MARK_STATUS[2][0], 'tags': [created_tags[0].pk], 'attrs': compare_attrs, 'comment': 'Change %s' % i, 'error_trace': error_trace }) }) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual( len(ReportUnsafeTag.objects.filter(report__root__job=self.job, report__parent=None)), len(ReportUnsafe.objects.filter(verdict=UNSAFE_VERDICTS[2][0])) ) self.assertEqual(len(MarkUnsafeHistory.objects.filter(mark=newmark)), 5) # Get 3d version data response = self.client.get(reverse('marks:mark_form', args=['unsafe', newmark.pk, 'edit']), params={'version': 3}) self.assertEqual(response.status_code, 200) # Compare 1st and 4th versions response = self.client.post('/marks/unsafe/%s/compare_versions/' % newmark.pk, {'v1': 1, 'v2': 4}) self.assertEqual(response.status_code, 200) # Remove 2nd and 4th versions response = self.client.post('/marks/unsafe/%s/remove_versions/' % newmark.pk, {'versions': json.dumps([2, 4])}) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') res = json.loads(str(response.content, encoding='utf8')) self.assertNotIn('error', res) self.assertIn('success', res) self.assertEqual(len(MarkUnsafeHistory.objects.filter(mark=newmark)), 3) # Reports' lists pages root_comp = ReportComponent.objects.get(root__job_id=self.job.pk, parent=None) response = self.client.get('%s?tag=%s' % (reverse('reports:unsafes', args=[root_comp.pk]), created_tags[0].pk)) self.assertIn(response.status_code, {200, 302}) response = self.client.get('%s?tag=%s' % (reverse('reports:unsafes', args=[root_comp.pk]), created_tags[1].pk)) self.assertIn(response.status_code, {200, 302}) response = self.client.get( '%s?verdict=%s' % (reverse('reports:unsafes', args=[root_comp.pk]), UNSAFE_VERDICTS[0][0]) ) self.assertIn(response.status_code, {200, 302}) response = self.client.get( '%s?verdict=%s' % (reverse('reports:unsafes', args=[root_comp.pk]), UNSAFE_VERDICTS[2][0]) ) self.assertIn(response.status_code, {200, 302}) # Download all marks response = self.client.get('/marks/api/download-all/') self.assertEqual(response.status_code, 200) self.assertNotEqual(response['Content-Type'], 'application/json') with open(os.path.join(settings.MEDIA_ROOT, self.all_marks_arch), mode='wb') as fp: for content in response.streaming_content: fp.write(content) # Delete all unsafe marks self.client.post('/marks/delete/', { 'type': 'unsafe', 'ids': json.dumps(list(MarkUnsafe.objects.values_list('id', flat=True))) }) self.assertEqual(MarkUnsafe.objects.count(), 0) # All verdicts must be "unsafe unmarked" self.assertEqual( ReportUnsafe.objects.filter(verdict=UNSAFE_VERDICTS[5][0]).count(), ReportUnsafe.objects.all().count() ) self.assertEqual(MarkUnsafeReport.objects.count(), 0) # Upload all marks with open(os.path.join(settings.MEDIA_ROOT, self.all_marks_arch), mode='rb') as fp: response = self.client.post('/marks/upload-all/', {'delete': 1, 'file': fp}) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual(int(json.loads(str(response.content, encoding='utf8'))['fail']), 0) self.assertEqual(int(json.loads(str(response.content, encoding='utf8'))['unsafe']), 1) def test_unknown(self): self.assertEqual(Job.objects.get(pk=self.job.pk).status, JOB_STATUS[3][0]) # Do not remove populated safe/unsafe marks as there are no problems with uploading populated marks response = self.client.post('/marks/delete/', { 'type': 'unknown', 'ids': json.dumps(list(MarkUnknown.objects.values_list('id', flat=True))) }) self.assertEqual(response.status_code, 200) # Get report unknown = None for u in ReportUnknown.objects.filter(root__job_id=self.job.pk): afc = ArchiveFileContent(u, 'problem_description', PROBLEM_DESC_FILE) if afc.content == b'KeyError: \'attr\' was not found.': unknown = u break if unknown is None: self.fail("Unknown with needed problem description was not found in test job decision") parent = ReportComponent.objects.get(pk=unknown.parent_id) # Inline mark form response = self.client.get('/marks/unknown/%s/create/inline/' % unknown.id) self.assertEqual(response.status_code, 200) # Create mark page response = self.client.get(reverse('marks:mark_form', args=['unknown', unknown.pk, 'create'])) self.assertEqual(response.status_code, 200) # Check regexp function response = self.client.post('/marks/check-unknown-mark/%s/' % unknown.pk, { 'function': "KeyError:\s'(\S*)'\swas\snot\sfound\.", 'pattern': 'KeyE: {0}', 'is_regex': 'true' }) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) # Save mark response = self.client.post(reverse('marks:mark_form', args=['unknown', unknown.pk, 'create']), { 'data': json.dumps({ 'description': 'Mark description', 'is_modifiable': True, 'status': MARK_STATUS[2][0], 'function': "KeyError:\s'(\S*)'\swas\snot\sfound\.", 'problem': 'KeyE: {0}', 'link': 'http://mysite.com/', 'is_regexp': True }) }) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') res = json.loads(str(response.content, encoding='utf8')) self.assertNotIn('error', res) self.assertIn('cache_id', res) cache_id = res['cache_id'] # Check mark's tables try: mark = MarkUnknown.objects.get(job=self.job, author__username='manager') except ObjectDoesNotExist: self.fail('Mark was not created') self.assertEqual(mark.type, MARK_TYPE[0][0]) self.assertEqual(mark.status, MARK_STATUS[2][0]) self.assertEqual(mark.version, 1) self.assertEqual(mark.description, 'Mark description') self.assertEqual(mark.link, 'http://mysite.com/') self.assertEqual(mark.problem_pattern, 'KeyE: {0}') self.assertEqual(mark.function, "KeyError:\s'(\S*)'\swas\snot\sfound\.") self.assertEqual(mark.is_modifiable, True) self.assertEqual(len(mark.versions.all()), 1) mark_version = MarkUnknownHistory.objects.get(mark=mark) self.assertEqual(mark_version.version, 1) self.assertEqual(mark_version.author.username, 'manager') self.assertEqual(mark_version.status, mark.status) self.assertEqual(mark_version.description, mark.description) self.assertEqual(mark_version.link, mark.link) self.assertEqual(mark_version.problem_pattern, mark.problem_pattern) self.assertEqual(mark_version.function, mark.function) self.assertEqual(len(UnknownProblem.objects.filter(name='KeyE: attr')), 1) self.assertEqual(len(MarkUnknownReport.objects.filter(mark=mark, report=unknown)), 1) # Associations changes response = self.client.get('/marks/unknown/association_changes/%s/' % cache_id) self.assertEqual(response.status_code, 200) # Edit mark page response = self.client.get(reverse('marks:mark_form', args=['unknown', mark.pk, 'edit'])) self.assertEqual(response.status_code, 200) # Edit mark response = self.client.post(reverse('marks:mark_form', args=['unknown', mark.pk, 'edit']), { 'data': json.dumps({ 'description': 'New mark description', 'is_modifiable': True, 'status': MARK_STATUS[1][0], 'function': "KeyError:\s'(\S*)'.*", 'problem': 'KeyE: {0}', 'link': 'http://mysite.com/', 'is_regexp': True, 'comment': 'Change 1' }) }) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') res = json.loads(str(response.content, encoding='utf8')) self.assertNotIn('error', res) self.assertIn('cache_id', res) cache_id = res['cache_id'] # Check mark's tables try: mark = MarkUnknown.objects.get(job=self.job, author__username='manager') except ObjectDoesNotExist: self.fail('Mark was not created') self.assertEqual(mark.version, 2) self.assertEqual(mark.description, 'New mark description') self.assertEqual(mark.is_modifiable, True) self.assertEqual(len(mark.versions.all()), 2) mark_version = MarkUnknownHistory.objects.filter(mark=mark).order_by('-version').first() self.assertEqual(mark_version.version, 2) self.assertEqual(mark_version.author.username, 'manager') self.assertEqual(mark_version.description, mark.description) self.assertEqual(mark_version.comment, 'Change 1') self.assertEqual(mark_version.link, mark.link) self.assertEqual(mark_version.problem_pattern, mark.problem_pattern) self.assertEqual(mark_version.function, mark.function) self.assertEqual(len(UnknownProblem.objects.filter(name='KeyE: attr')), 1) self.assertEqual(len(MarkUnknownReport.objects.filter(mark=mark, report=unknown)), 1) # Associations changes response = self.client.get('/marks/unknown/association_changes/%s/' % cache_id) self.assertEqual(response.status_code, 200) # Unknown marks list page response = self.client.get(reverse('marks:list', args=['unknown'])) self.assertEqual(response.status_code, 200) response = self.client.get(reverse('marks:mark', args=['unknown', mark.id])) self.assertEqual(response.status_code, 200) # Inline mark eddit form response = self.client.get('/marks/unknown/%s/edit/inline/' % mark.id) self.assertEqual(response.status_code, 200) # Confirm/unconfirm association # Mark is automatically associated after its changes self.assertEqual( MarkUnknownReport.objects.filter(mark=mark, report=unknown, type=ASSOCIATION_TYPE[0][0]).count(), 1 ) response = self.client.post('/marks/association/unknown/%s/%s/unconfirm/' % (unknown.pk, mark.pk)) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual(MarkUnknownReport.objects.filter( mark=mark, report=unknown, type=ASSOCIATION_TYPE[2][0]).count(), 1) response = self.client.post('/marks/association/unknown/%s/%s/confirm/' % (unknown.pk, mark.pk)) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual(MarkUnknownReport.objects.filter( mark=mark, report=unknown, type=ASSOCIATION_TYPE[1][0]).count(), 1) # Like/dislike association response = self.client.post('/marks/association/unknown/%s/%s/like/' % (unknown.id, mark.id)) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual(UnknownAssociationLike.objects.filter( association__report=unknown, association__mark=mark, dislike=False ).count(), 1) response = self.client.post('/marks/association/unknown/%s/%s/dislike/' % (unknown.id, mark.id)) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual(UnknownAssociationLike.objects.filter( association__report=unknown, association__mark=mark, dislike=True ).count(), 1) self.assertEqual(UnknownAssociationLike.objects.filter( association__report=unknown, association__mark=mark, dislike=False ).count(), 0) # Download mark response = self.client.get(reverse('marks:unknown-download', args=[mark.pk])) self.assertEqual(response.status_code, 200) self.assertIn(response['Content-Type'], {'application/x-zip-compressed', 'application/zip'}) with open(os.path.join(settings.MEDIA_ROOT, self.unknown_archive), mode='wb') as fp: for content in response.streaming_content: fp.write(content) # Download mark in preset format response = self.client.get(reverse('marks:unknown-download-preset', args=[mark.pk])) self.assertEqual(response.status_code, 200) # Delete mark response = self.client.post('/marks/delete/', {'type': 'unknown', 'ids': json.dumps([mark.id])}) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') res = json.loads(str(response.content, encoding='utf8')) self.assertNotIn('error', res) self.assertEqual(len(MarkUnknown.objects.all()), 0) self.assertEqual(len(MarkUnknownReport.objects.all()), 0) # Upload mark with open(os.path.join(settings.MEDIA_ROOT, self.unknown_archive), mode='rb') as fp: response = self.client.post('/marks/upload/', {'file': fp}) fp.close() self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') res = json.loads(str(response.content, encoding='utf8')) self.assertIn('id', res) self.assertEqual(res.get('type'), 'unknown') try: newmark = MarkUnknown.objects.get(pk=res['id']) except ObjectDoesNotExist: self.fail('Mark was not uploaded') self.assertEqual(newmark.version, 2) self.assertEqual(newmark.description, 'New mark description') self.assertEqual(newmark.is_modifiable, True) self.assertEqual(len(newmark.versions.all()), 2) newmark_version = MarkUnknownHistory.objects.filter(mark=newmark).order_by('-version').first() self.assertEqual(newmark_version.version, 2) self.assertEqual(newmark_version.author.username, 'manager') self.assertEqual(newmark_version.comment, 'Change 1') self.assertEqual(len(MarkUnknownReport.objects.filter(mark=newmark, report=unknown)), 1) self.assertEqual(len(MarkUnknownReport.objects.filter(report=unknown)), 1) self.assertEqual(len(UnknownProblem.objects.filter(name='KeyE: attr')), 1) # Check non-regexp function response = self.client.post('/marks/check-unknown-mark/%s/' % unknown.pk, { 'function': "KeyError: 'attr' was not found.", 'pattern': 'KeyE: attr', 'is_regex': 'false' }) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) # Non-regexp function change response = self.client.post(reverse('marks:mark_form', args=['unknown', newmark.pk, 'edit']), { 'data': json.dumps({ 'description': 'New mark description', 'is_modifiable': True, 'status': MARK_STATUS[2][0], 'function': "KeyError: 'attr' was not found.", 'problem': 'KeyE: attr', 'link': 'http://mysite.com/', 'is_regexp': False, 'comment': 'Change 3' }) }) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) # Some more mark changes for i in range(4, 6): response = self.client.post(reverse('marks:mark_form', args=['unknown', newmark.pk, 'edit']), { 'data': json.dumps({ 'description': 'No regexp', 'is_modifiable': True, 'status': MARK_STATUS[2][0], 'function': "KeyError:.*'(\S*)'", 'problem': 'KeyE: {0}', 'link': 'http://mysite.com/', 'is_regexp': True, 'comment': 'Change %s' % i }) }) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual(len(MarkUnknownHistory.objects.filter(mark=newmark)), 5) # Get 3d version data response = self.client.get(reverse('marks:mark_form', args=['unknown', newmark.pk, 'edit']), params={'version': 3}) self.assertEqual(response.status_code, 200) # Compare 1st and 4th versions response = self.client.post('/marks/unknown/%s/compare_versions/' % newmark.pk, {'v1': 1, 'v2': 4}) self.assertEqual(response.status_code, 200) # Remove 2nd and 4th versions response = self.client.post('/marks/unknown/%s/remove_versions/' % newmark.pk, {'versions': json.dumps([2, 4])}) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') res = json.loads(str(response.content, encoding='utf8')) self.assertNotIn('error', res) self.assertIn('success', res) self.assertEqual(len(MarkUnknownHistory.objects.filter(mark=newmark)), 3) # Reports' lists pages root_comp = ReportComponent.objects.get(root__job_id=self.job.pk, parent=None) response = self.client.get( '%s?component=%s' % (reverse('reports:unknowns', args=[root_comp.pk]), parent.component_id) ) self.assertIn(response.status_code, {200, 302}) try: problem_id = UnknownProblem.objects.get(name='KeyE: attr').pk except ObjectDoesNotExist: self.fail("Can't find unknown problem") response = self.client.get('%s?component=%s&problem=%s' % ( reverse('reports:unknowns', args=[root_comp.pk]), parent.component_id, problem_id )) self.assertIn(response.status_code, {200, 302}) # Download all marks response = self.client.get('/marks/api/download-all/') self.assertEqual(response.status_code, 200) self.assertNotEqual(response['Content-Type'], 'application/json') with open(os.path.join(settings.MEDIA_ROOT, self.all_marks_arch), mode='wb') as fp: for content in response.streaming_content: fp.write(content) # Delete all marks self.client.post('/marks/delete/', { 'type': 'unknown', 'ids': json.dumps(list(MarkUnknown.objects.values_list('id', flat=True))) }) self.assertEqual(MarkUnknown.objects.count(), 0) # All verdicts must be "unknown unmarked" self.assertEqual(MarkUnknownReport.objects.all().count(), 0) # Upload all marks with open(os.path.join(settings.MEDIA_ROOT, self.all_marks_arch), mode='rb') as fp: response = self.client.post('/marks/upload-all/', {'delete': 1, 'file': fp}) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') self.assertNotIn('error', json.loads(str(response.content, encoding='utf8'))) self.assertEqual(int(json.loads(str(response.content, encoding='utf8'))['fail']), 0) self.assertEqual(int(json.loads(str(response.content, encoding='utf8'))['unknown']), 1) def tearDown(self): if os.path.exists(os.path.join(settings.MEDIA_ROOT, self.safe_archive)): os.remove(os.path.join(settings.MEDIA_ROOT, self.safe_archive)) if os.path.exists(os.path.join(settings.MEDIA_ROOT, self.unsafe_archive)): os.remove(os.path.join(settings.MEDIA_ROOT, self.unsafe_archive)) if os.path.exists(os.path.join(settings.MEDIA_ROOT, self.unknown_archive)): os.remove(os.path.join(settings.MEDIA_ROOT, self.unknown_archive)) if os.path.exists(os.path.join(settings.MEDIA_ROOT, self.test_tagsfile)): os.remove(os.path.join(settings.MEDIA_ROOT, self.test_tagsfile)) if os.path.exists(os.path.join(settings.MEDIA_ROOT, self.all_marks_arch)): os.remove(os.path.join(settings.MEDIA_ROOT, self.all_marks_arch)) super(TestMarks, self).tearDown()
Getty Images (Updated) Why is the U.S. government cracking down on medical marijuana, a $1.7 billion business — and one of the few that seems to be thriving in a moribund economy? In early October, the Justice Department announced that it would be targeting medical-marijuana dispensaries in California. Calling large dispensaries “profiteers” that “hijacked” the state’s medical-marijuana law, “motivated not by compassion but by money,” California’s four U.S. Attorneys announced the arrests of two major dispensary owners and a lawyer they accused of making millions from growing the drug. It was a reversal of President Obama’s campaign promise to end the previous Administration’s legal pursuit of medical marijuana. Although Obama’s Justice Department had previously abided by a memo that said prosecuting marijuana providers and patients who followed state law was not an “efficient use of federal resources,” over the summer, the Administration changed tack, expressing concern about “an increase in the scope of commercial cultivation, sale, distribution and use of marijuana for purported medical purposes.” It began sending letters to dispensaries and their landlords threatening forfeiture of the properties if marijuana sales did not stop. The IRS has also begun a crackdown on California dispensaries. It claims that the dispensaries owe back taxes because their business deductions were illegal. In addition, the Bureau of Alcohol, Tobacco and Firearms recently warned gun dealers to not sell to users of medical marijuana. (MORE: More Evidence That Marijuana-like Drugs May Help Prevent PTSD) Ironically, national support for medical marijuana is at a high, at about 70%, and more and more advocates are calling for total legalization of the drug. For the first time ever, Gallup found last week that more Americans support making marijuana use legal: 50% of Americans support legalization, with 46% opposed. That’s up from just 12% in favor in 1969. Support for legalization is even higher in younger age groups: 62% of those ages 18 to 29 want legal marijuana, while just 31% of those over 65 favor changing the current law. As boomers and even-more-weed-friendly Gen Xers age, pro-legalization sentiment continues to grow. Add to that support for legalization from the California Medical Association, the state’s largest group representing doctors, with some 35,000 members. In this context, medical marijuana doesn’t seem like a crime voters are clamoring to prosecute. The federal crackdown isn’t likely to affect marijuana consumption, either. Studies repeatedly find little effect of law-enforcement spending on demand for drugs. Indeed, a recent marijuana price analysis by a collective of geographers called The Floating Sheep (you can’t make this stuff up!) — based on crowd-sourced data on the street value of marijuana by quantity, quality and location — found no correlation between the local cost of marijuana and the number of arrests for dealing or possession in the state. (MORE: Medical-Marijuana Sales Grow to Rival Viagra’s: New Report) Rather, price is correlated with location. As the Atlantic‘s Richard Florida describes it: Their main finding is that marijuana prices rise the further a location is from the major center of production. Decreased supply leads to a rise in transportation costs and risk. Clearly pot prices are as low as they are in the Pacific Northwest and Florida for the same reasons that potatoes are cheap in Idaho and corn is cheap in Iowa — because they’re close to the source, the places where the product is either grown, imported, processed, or all three. (Incidentally, the nationwide average for an ounce of high-quality smoke is $377.02.) It seems unlikely that spending scarce federal dollars during a recession on a medical-marijuana crackdown is going to win any awards for “efficient” use of government resources from either the right or the left. In fact, I seem to recall that there’s a Senate committee desperately seeking quick budget cuts. In view of these facts, do you think they should slash schools, meals for seniors, health care spending, cancer research, unemployment benefits, firefighter or police salaries — or the war on medical marijuana? MORE: U.S. Rules That Marijuana Has No Medical Use. What Does Science Say? Correction [Oct. 25]: The original version of this post misstated that the size of the medical-marijuana market was $1.7 billion in California; that estimate is for the entire U.S. Maia Szalavitz is a health writer at TIME.com. Find her on Twitter at @maiasz. You can also continue the discussion on TIME Healthland’s Facebook page and on Twitter at @TIMEHealthland.
All human beings must fulfill their basic, physiological needs in order to sustain a healthy and productive life. If you just look at Maslow’s Hierarchy of Needs you will find that human beings first require the fulfillment of physiological needs and then safety needs. Physiological is pretty obvious and safety of course means the “security of body, employment, resources, health etc. See Maslow’s Hierarchy here: http://en.wikipedia.org/... Now in order to fulfill these needs that Maslow describes, what is necessary? Well in current society, the answer is a steady income, through which men can procure these needs. A disparity between income and need leads to poverty, starvation, homelessness, ( ). Only a living wage i.e., a wage that is sufficient enough to meet the basic needs of men can alleviate poverty. That is why OWS is calling for Occupy Midsummer (continue reading):
Role changes in software development projects using agile methodologies: A case study of private companies in Guatemala City This new era of software development began to generate changes in the way software development teams are organized, new roles and functions are established in project management. In many cases this role does not exist and a similar role must be chosen and the project manager must adapt. This research seeks to know the changes made by software project managers in private companies in Guatemala City. The research hypothesizes that the implementation of agile methodologies produces an update in the functions and roles in the area of project management in private institutions. For this purpose, a four-phase study was carried out: Design of the instrument, Distribution of the instrument, Processing of the information and Analysis of the information. A sample of 35 private companies was worked with the participation of software development team leaders. The results obtained from an online survey allowed us to discover that there are considerable changes in the roles and responsibilities of project leaders in organizations that migrate from traditional methodology to agile methodology.
<reponame>JieyangChen7/MGARD /* * Copyright 2022, Oak Ridge National Laboratory. * MGARD-X: MultiGrid Adaptive Reduction of Data Portable across GPUs and CPUs * Author: <NAME> (<EMAIL>) * Date: March 17, 2022 */ #include <chrono> #include <iomanip> #include <iostream> #include <numeric> #include <vector> #include "compress_x.hpp" #include "mgard-x/CompressionHighLevel/CompressionHighLevel.h" #include "mgard-x/CompressionHighLevel/Metadata.hpp" #include "mgard-x/Hierarchy/Hierarchy.h" #include "mgard-x/RuntimeX/RuntimeXPublic.h" namespace mgard_x { enum device_type auto_detect_device() { enum device_type dev_type = device_type::NONE; #if MGARD_ENABLE_SERIAL dev_type = device_type::SERIAL; #endif #if MGARD_ENABLE_CUDA if (deviceAvailable<CUDA>()) { dev_type = device_type::CUDA; } #endif #if MGARD_ENABLE_HIP if (deviceAvailable<HIP>()) { dev_type = device_type::HIP; } #endif #if MGARD_ENABLE_SYCL if (deviceAvailable<SYCL>()) { dev_type = device_type::SYCL; } #endif if (dev_type == device_type::NONE) { std::cout << log::log_err << "MGARD-X was not built with any backend.\n"; exit(-1); } return dev_type; } void compress(DIM D, data_type dtype, std::vector<SIZE> shape, double tol, double s, enum error_bound_type mode, const void *original_data, void *&compressed_data, size_t &compressed_size, Config config, bool output_pre_allocated) { enum device_type dev_type = config.dev_type; if (dev_type == device_type::AUTO) { dev_type = auto_detect_device(); } if (dev_type == device_type::SERIAL) { #if MGARD_ENABLE_SERIAL compress<SERIAL>(D, dtype, shape, tol, s, mode, original_data, compressed_data, compressed_size, config, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with SERIAL backend.\n"; exit(-1); #endif } else if (dev_type == device_type::CUDA) { #if MGARD_ENABLE_CUDA compress<CUDA>(D, dtype, shape, tol, s, mode, original_data, compressed_data, compressed_size, config, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with CUDA backend.\n"; exit(-1); #endif } else if (dev_type == device_type::HIP) { #if MGARD_ENABLE_HIP compress<HIP>(D, dtype, shape, tol, s, mode, original_data, compressed_data, compressed_size, config, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with HIP backend.\n"; exit(-1); #endif } else if (dev_type == device_type::SYCL) { #if MGARD_ENABLE_SYCL compress<SYCL>(D, dtype, shape, tol, s, mode, original_data, compressed_data, compressed_size, config, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with SYCL backend.\n"; exit(-1); #endif } else { std::cout << log::log_err << "Unsupported backend.\n"; } } void compress(DIM D, data_type dtype, std::vector<SIZE> shape, double tol, double s, enum error_bound_type mode, const void *original_data, void *&compressed_data, size_t &compressed_size, bool output_pre_allocated) { enum device_type dev_type = auto_detect_device(); if (dev_type == device_type::SERIAL) { #if MGARD_ENABLE_SERIAL compress<SERIAL>(D, dtype, shape, tol, s, mode, original_data, compressed_data, compressed_size, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with SERIAL backend.\n"; exit(-1); #endif } else if (dev_type == device_type::CUDA) { #if MGARD_ENABLE_CUDA compress<CUDA>(D, dtype, shape, tol, s, mode, original_data, compressed_data, compressed_size, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with CUDA backend.\n"; exit(-1); #endif } else if (dev_type == device_type::HIP) { #if MGARD_ENABLE_HIP compress<HIP>(D, dtype, shape, tol, s, mode, original_data, compressed_data, compressed_size, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with HIP backend.\n"; exit(-1); #endif } else if (dev_type == device_type::SYCL) { #if MGARD_ENABLE_SYCL compress<SYCL>(D, dtype, shape, tol, s, mode, original_data, compressed_data, compressed_size, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with SYCL backend.\n"; exit(-1); #endif } else { std::cout << log::log_err << "Unsupported backend.\n"; } } void compress(DIM D, data_type dtype, std::vector<SIZE> shape, double tol, double s, enum error_bound_type mode, const void *original_data, void *&compressed_data, size_t &compressed_size, std::vector<const Byte *> coords, Config config, bool output_pre_allocated) { enum device_type dev_type = config.dev_type; if (dev_type == device_type::AUTO) { dev_type = auto_detect_device(); } if (dev_type == device_type::SERIAL) { #if MGARD_ENABLE_SERIAL compress<SERIAL>(D, dtype, shape, tol, s, mode, original_data, compressed_data, compressed_size, coords, config, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with SERIAL backend.\n"; exit(-1); #endif } else if (dev_type == device_type::CUDA) { #if MGARD_ENABLE_CUDA compress<CUDA>(D, dtype, shape, tol, s, mode, original_data, compressed_data, compressed_size, coords, config, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with CUDA backend.\n"; exit(-1); #endif } else if (dev_type == device_type::HIP) { #if MGARD_ENABLE_HIP compress<HIP>(D, dtype, shape, tol, s, mode, original_data, compressed_data, compressed_size, coords, config, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with HIP backend.\n"; exit(-1); #endif } else if (dev_type == device_type::SYCL) { #if MGARD_ENABLE_SYCL compress<SYCL>(D, dtype, shape, tol, s, mode, original_data, compressed_data, compressed_size, coords, config, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with SYCL backend.\n"; exit(-1); #endif } else { std::cout << log::log_err << "Unsupported backend.\n"; } } void compress(DIM D, data_type dtype, std::vector<SIZE> shape, double tol, double s, enum error_bound_type mode, const void *original_data, void *&compressed_data, size_t &compressed_size, std::vector<const Byte *> coords, bool output_pre_allocated) { enum device_type dev_type = auto_detect_device(); if (dev_type == device_type::SERIAL) { #if MGARD_ENABLE_SERIAL compress<SERIAL>(D, dtype, shape, tol, s, mode, original_data, compressed_data, compressed_size, coords, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with SERIAL backend.\n"; exit(-1); #endif } else if (dev_type == device_type::CUDA) { #if MGARD_ENABLE_CUDA compress<CUDA>(D, dtype, shape, tol, s, mode, original_data, compressed_data, compressed_size, coords, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with CUDA backend.\n"; exit(-1); #endif } else if (dev_type == device_type::HIP) { #if MGARD_ENABLE_HIP compress<HIP>(D, dtype, shape, tol, s, mode, original_data, compressed_data, compressed_size, coords, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with HIP backend.\n"; exit(-1); #endif } else if (dev_type == device_type::SYCL) { #if MGARD_ENABLE_SYCL compress<SYCL>(D, dtype, shape, tol, s, mode, original_data, compressed_data, compressed_size, coords, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with SYCL backend.\n"; exit(-1); #endif } else { std::cout << log::log_err << "Unsupported backend.\n"; } } void decompress(const void *compressed_data, size_t compressed_size, void *&decompressed_data, Config config, bool output_pre_allocated) { enum device_type dev_type = config.dev_type; if (dev_type == device_type::AUTO) { dev_type = auto_detect_device(); } if (dev_type == device_type::SERIAL) { #if MGARD_ENABLE_SERIAL decompress<SERIAL>(compressed_data, compressed_size, decompressed_data, config, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with SERIAL backend.\n"; exit(-1); #endif } else if (dev_type == device_type::CUDA) { #if MGARD_ENABLE_CUDA decompress<CUDA>(compressed_data, compressed_size, decompressed_data, config, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with CUDA backend.\n"; exit(-1); #endif } else if (dev_type == device_type::HIP) { #if MGARD_ENABLE_HIP decompress<HIP>(compressed_data, compressed_size, decompressed_data, config, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with HIP backend.\n"; exit(-1); #endif } else if (dev_type == device_type::SYCL) { #if MGARD_ENABLE_SYCL decompress<SYCL>(compressed_data, compressed_size, decompressed_data, config, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with SYCL backend.\n"; exit(-1); #endif } else { std::cout << log::log_err << "Unsupported backend.\n"; } } void decompress(const void *compressed_data, size_t compressed_size, void *&decompressed_data, bool output_pre_allocated) { enum device_type dev_type = auto_detect_device(); if (dev_type == device_type::SERIAL) { #if MGARD_ENABLE_SERIAL decompress<SERIAL>(compressed_data, compressed_size, decompressed_data, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with SERIAL backend.\n"; exit(-1); #endif } else if (dev_type == device_type::CUDA) { #if MGARD_ENABLE_CUDA decompress<CUDA>(compressed_data, compressed_size, decompressed_data, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with CUDA backend.\n"; exit(-1); #endif } else if (dev_type == device_type::HIP) { #if MGARD_ENABLE_HIP decompress<HIP>(compressed_data, compressed_size, decompressed_data, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with HIP backend.\n"; exit(-1); #endif } else if (dev_type == device_type::SYCL) { #if MGARD_ENABLE_SYCL decompress<SYCL>(compressed_data, compressed_size, decompressed_data, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with SYCL backend.\n"; exit(-1); #endif } else { std::cout << log::log_err << "Unsupported backend.\n"; } } void decompress(const void *compressed_data, size_t compressed_size, void *&decompressed_data, std::vector<mgard_x::SIZE> &shape, data_type &dtype, Config config, bool output_pre_allocated) { enum device_type dev_type = config.dev_type; if (dev_type == device_type::AUTO) { dev_type = auto_detect_device(); } if (dev_type == device_type::SERIAL) { #if MGARD_ENABLE_SERIAL decompress<SERIAL>(compressed_data, compressed_size, decompressed_data, dtype, shape, config, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with SERIAL backend.\n"; exit(-1); #endif } else if (dev_type == device_type::CUDA) { #if MGARD_ENABLE_CUDA decompress<CUDA>(compressed_data, compressed_size, decompressed_data, dtype, shape, config, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with CUDA backend.\n"; exit(-1); #endif } else if (dev_type == device_type::HIP) { #if MGARD_ENABLE_HIP decompress<HIP>(compressed_data, compressed_size, decompressed_data, dtype, shape, config, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with HIP backend.\n"; exit(-1); #endif } else if (dev_type == device_type::SYCL) { #if MGARD_ENABLE_SYCL decompress<SYCL>(compressed_data, compressed_size, decompressed_data, dtype, shape, config, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with SYCL backend.\n"; exit(-1); #endif } else { std::cout << log::log_err << "Unsupported backend.\n"; } } void decompress(const void *compressed_data, size_t compressed_size, void *&decompressed_data, std::vector<mgard_x::SIZE> &shape, data_type &dtype, bool output_pre_allocated) { enum device_type dev_type = auto_detect_device(); if (dev_type == device_type::SERIAL) { #if MGARD_ENABLE_SERIAL decompress<SERIAL>(compressed_data, compressed_size, decompressed_data, dtype, shape, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with SERIAL backend.\n"; exit(-1); #endif } else if (dev_type == device_type::CUDA) { #if MGARD_ENABLE_CUDA decompress<CUDA>(compressed_data, compressed_size, decompressed_data, dtype, shape, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with CUDA backend.\n"; exit(-1); #endif } else if (dev_type == device_type::HIP) { #if MGARD_ENABLE_HIP decompress<HIP>(compressed_data, compressed_size, decompressed_data, dtype, shape, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with HIP backend.\n"; exit(-1); #endif } else if (dev_type == device_type::SYCL) { #if MGARD_ENABLE_SYCL decompress<SYCL>(compressed_data, compressed_size, decompressed_data, dtype, shape, output_pre_allocated); #else std::cout << log::log_err << "MGARD-X was not built with SYCL backend.\n"; exit(-1); #endif } else { std::cout << log::log_err << "Unsupported backend.\n"; } } void BeginAutoTuning(enum device_type dev_type) { if (dev_type == device_type::AUTO) { dev_type = auto_detect_device(); } if (dev_type == device_type::SERIAL) { #if MGARD_ENABLE_SERIAL mgard_x::BeginAutoTuning<mgard_x::SERIAL>(); #else std::cout << log::log_err << "MGARD-X was not built with SERIAL backend.\n"; exit(-1); #endif } else if (dev_type == device_type::CUDA) { #if MGARD_ENABLE_CUDA mgard_x::BeginAutoTuning<mgard_x::CUDA>(); #else std::cout << log::log_err << "MGARD-X was not built with CUDA backend.\n"; exit(-1); #endif } else if (dev_type == device_type::HIP) { #if MGARD_ENABLE_HIP mgard_x::BeginAutoTuning<mgard_x::HIP>(); #else std::cout << log::log_err << "MGARD-X was not built with HIP backend.\n"; exit(-1); #endif } else if (dev_type == device_type::SYCL) { #if MGARD_ENABLE_SYCL mgard_x::BeginAutoTuning<mgard_x::SYCL>(); #else std::cout << log::log_err << "MGARD-X was not built with SYCL backend.\n"; exit(-1); #endif } else { std::cout << log::log_err << "Unsupported backend.\n"; } } void EndAutoTuning(enum device_type dev_type) { if (dev_type == device_type::AUTO) { dev_type = auto_detect_device(); } if (dev_type == device_type::SERIAL) { #if MGARD_ENABLE_SERIAL mgard_x::EndAutoTuning<mgard_x::SERIAL>(); #else std::cout << log::log_err << "MGARD-X was not built with SERIAL backend.\n"; exit(-1); #endif } else if (dev_type == device_type::CUDA) { #if MGARD_ENABLE_CUDA mgard_x::EndAutoTuning<mgard_x::CUDA>(); #else std::cout << log::log_err << "MGARD-X was not built with CUDA backend.\n"; exit(-1); #endif } else if (dev_type == device_type::HIP) { #if MGARD_ENABLE_HIP mgard_x::EndAutoTuning<mgard_x::HIP>(); #else std::cout << log::log_err << "MGARD-X was not built with HIP backend.\n"; exit(-1); #endif } else if (dev_type == device_type::SYCL) { #if MGARD_ENABLE_SYCL mgard_x::EndAutoTuning<mgard_x::SYCL>(); #else std::cout << log::log_err << "MGARD-X was not built with SYCL backend.\n"; exit(-1); #endif } else { std::cout << log::log_err << "Unsupported backend.\n"; } } } // namespace mgard_x
package com.noobit.web.filter; import org.springframework.stereotype.Component; import javax.servlet.*; import java.io.IOException; import java.util.Date; //@Component public class TimerFilter implements Filter { @Override public void init(FilterConfig filterConfig) throws ServletException { System.out.println("time filter init"); } @Override public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain) throws IOException, ServletException { System.out.println("time filter start"); long start = new Date().getTime(); filterChain.doFilter(servletRequest, servletResponse); long end = new Date().getTime(); System.out.println("time filter: " + (end - start)); System.out.println("time filter finish"); } @Override public void destroy() { System.out.println("time filter destroy"); } }
Impact of using multiple-satellite sensors on the accuracy of daily-mean sea surface wind data ABSTRACT Most satellites observing sea surface winds have sun-synchronous orbits and provide observation data at the same place two times per a day. A daily mean value estimated from these data suffers from sampling errors because the high-frequency variation including a diurnal change in the wind field cannot be neglected. To overcome this problem, the use of multiple satellites is useful. The purposes of this study were to describe the time variation of the accuracy of daily mean wind data in a third-generation Japanese Ocean Flux Data Sets with Use of Remote Sensing Observations (J-OFURO3) and to investigate its causes by comparison with in situ measurement data from moored buoys. The results reveal that the three statistical measures such as bias, Root Mean Square error and cross correlation coefficient have improved over time. A set of scatter diagrams of the number of satellites versus the statistical measures for each year shows strong correlations. The accuracy of daily mean data provided by J-OFURO3 is concluded to depend on the number of satellites. We also focused on specific time intervals for satellite wind observations, particularly the maximum missing time interval (MMTI) within a day. The results showed that the correlations between the three statistical measures and MMTI were quite high. Because the above-mentioned two causes are not independent, we analysed the combined effect of the two causes together. The results show that the accuracy of daily mean data depends more strongly on MMTI than on the number of observations. By applying these results, it is possible to determine the optimal number of sensors and the optimal observation time to achieve maximum target accuracy. This provides very useful information for the design of satellite observation systems for sea surface wind.
import { Component , OnInit } from '@angular/core'; import {MsgService} from "../../../service"; import {DateUtils} from '@shared/utils'; import { QueryModel } from './query.model' import {RESPONSE} from '../../../models'; import {GoodsClassifyService} from '../../../service/goods'; import {FormBuilder, FormGroup, Validators} from '@angular/forms'; import {Service} from '../../../../decorators/service.decorator'; @Component({ selector: 'goods-classify' , templateUrl: './classify.component.html', styleUrls: ['./classify.component.less'] }) export class GoodsClassifyComponent implements OnInit{ constructor( private readonly msg: MsgService, private readonly service: GoodsClassifyService, private readonly fb: FormBuilder ){} ngOnInit(): void { this.getList() ; } private queryModel: QueryModel = new QueryModel ; public isVisible: boolean = false ; public infoBoxShow: boolean = false ; public editMark: boolean = false ; public tableData = { loading: true, page: 1, total: 0, columns: [ { title: '名称', type: 'text', reflect: 'name' }, { title: '备注', type: 'text', reflect: 'remark' }, { title: '店铺', type: 'text', filter: val => val.shopInfo.name }, { title: "创建时间" , type: 'text' , filter: ( val ) => { return DateUtils.format( val.createTime , 'y-m-d') ; }}, ], data: [], btn: { title: '操作', items: [{ type: 'edit', title: '编辑', fn: (data) => { this.form.patchValue( data ) ; this.editMark = true ; this.infoBoxShow = true ; }},{ type: 'del', title: '删除', fn: (data) => { this.isVisible = true; this.form.patchValue(data); }} ], }, change: (type: string, size: number) => { if ( type === 'size' ) this.queryModel.pageSize = size; if ( type === 'page' ) { this.tableData.page = size; this.queryModel.currentPage = size; } this.getList() }, }; public getList(): void{ this.tableData.loading = true ; this.service.get( this.queryModel ) .subscribe((res: RESPONSE) => { this.tableData.data = res.data ; this.tableData.loading = false; if(res.page) this.tableData.total = res.page.totalNumber ; },err => { this.tableData.loading = false ; this.msg.error( err ) ; }); }; public add(): void{ this.form.reset() ; this.infoBoxShow = !this.infoBoxShow ; this.editMark = false ; }; public form: FormGroup = this.fb.group({ name : [ null , [ Validators.required ]], remark : [ null ] , id : [ null ] }); @Service('service.delete', true, function(){ return (this as GoodsClassifyComponent).form.value ; }) modalConfirm($event: Event) { this.msg.success('删除成功'); this.isVisible = false; this.getList() ; }; @Service("service.post" , true , function(){ return (this as GoodsClassifyComponent).form.value ; }) makeNew( $event : MouseEvent ): void{ this.msg.success("添加成功") ; this.infoBoxShow = false ; this.getList(); }; @Service("service.put" , true , function(){ return (this as GoodsClassifyComponent).form.value ; }) save( $event : MouseEvent ): void{ this.msg.success("修改成功"); this.infoBoxShow = false ; this.getList() ; }; }
package herramientas; import java.util.ArrayList; import sintactico.Primeros; //Nota: Esta clase depende de la lectura de la gram�tica de primeros. Es un c�digo no �ptimo. public class Gramatica { public static PSTabla leeGramatica (String expresion, int offset) { PSTabla gramatica = new PSTabla ("Gramatica"); Primeros prim = new Primeros (expresion, false); if (offset > 0) for (int i = 0 ; i < offset ; i++) { PSTupla tupla = new PSTupla (); gramatica.add(tupla); } for (int i = 0 ; i < prim.getGramatica().size() ; i++) { PSTupla tupla = new PSTupla (); for (int j = 0 ; j < prim.getGramatica().get(i).size() ; j++) { if (j > 0) tupla.add(prim.getGramatica().get(i).get(j).letra); else tupla.noTerminal = prim.getGramatica().get(i).get(j).letra; } gramatica.add(tupla); } //System.out.println(gramatica); return gramatica; } public static PSTabla leeGramaticaNoEps (String expresion, int offset) { PSTabla gramatica = new PSTabla ("Gramatica"); Primeros prim = new Primeros (expresion, false); if (offset > 0) for (int i = 0 ; i < offset ; i++) { PSTupla tupla = new PSTupla (); gramatica.add(tupla); } for (int i = 0 ; i < prim.getGramatica().size() ; i++) { PSTupla tupla = new PSTupla (); for (int j = 0 ; j < prim.getGramatica().get(i).size() ; j++) { if (j > 0) { //System.out.println(prim.getGramatica().get(i).get(j).letra); if (!prim.getGramatica().get(i).get(j).letra.equals("Eps")) tupla.add(prim.getGramatica().get(i).get(j).letra); } else tupla.noTerminal = prim.getGramatica().get(i).get(j).letra; } gramatica.add(tupla); } //System.out.println(gramatica); return gramatica; } public static PSTabla leeGramatica (String expresion) { return leeGramatica (expresion, 0); } public static ArrayList <String> retornaNoTerminales (PSTabla g) { ArrayList <String> vec = new ArrayList <String> (); for (int i = 0 ; i < g.size() ; i++) { if (!vec.contains(g.get(i).noTerminal)) vec.add(g.get(i).noTerminal); } return vec; } public static ArrayList <String> retornaTerminales (PSTabla g) { ArrayList <String> terminales = new ArrayList <String> (); ArrayList <String> noTerminales = retornaNoTerminales(g); for (int i = 0 ; i < g.size() ; i++) for (int j = 0 ; j < g.get(i).getConjunto().size() ; j++) { String caracter = g.get(i).getConjunto().get(j); if (!noTerminales.contains(caracter)) { if (!terminales.contains(caracter)) { terminales.add(caracter); } } } return terminales; } public static boolean existeNoTerminal (ArrayList <String> noTerminales, String str) { boolean flag = false; if (noTerminales.contains(str)) flag = true; return flag; } }
A TWO-SEX POLYGENIC MODEL FOR THE EVOLUTION O F PREMATING ISOLATION. 11. COMPUTER SIMULATION OF EXPERIMENTAL SELECTION PROCEDURES A Monte-Carlo simulation program is described for a polygenic mating model introduced in the first paper in this series (SVED 1981). The program is used to simulate the situation in laboratory experiments in which two strains are allowed to mass-mate, hybrids are artificially eliminated and the establishment of mating isolation is studied. I t is shown that, if mating choice is sufficiently precise, a combination of chance fluctuation and selection can lead to divergence in mating behavior. However, for small population sizzs, the variability would usually be considerably reduced by the time some divergence is established, leading to low eventual levels of isolation. For larger population sizes, on the other hand, it may take many generations for any divergence to be established.-A dissection of the selective forces involved in the divergence shows that the major force potentially responsible for initial selective response is the tendency for divergent females and males to reject mates from the wrong strain. However, this is nullified in mixed-strain matings by the tendency of such individuals equally to reject mates from the correct strain. To overcome this problem, it is suggested that the usual mixed strain mating procedure be replaced by procedures specifically designed to select for rejection of interstrain matings. Two procedures are suggested for this, and computer simulation shows that one or other of the procedures will work under the assumptions of the mating model. Other possible outcomes of selection, including asymmetrical divergence, are discussed for cases in which the assumptions of the mating model are invalid. T H E first paper in the series (SVED 1981) introduced a model in which mating is controlled by two different sets of genes that determine male and female mating behavior, respectively. The model was applied to the situation in which two strains of an organism have diverged to produce sterile hybrids, and it was shown that premating isolation would evolve only if there were some initial divergence of mating behavior between the two strains. Such initial divergence could arise by chance in a small population, although no analysis of this possibility was attempted. 1 Permanent address. Genetics 97: 217-235 January, 1981.
<gh_stars>1-10 #include <iostream> #include <catch2/catch.hpp> #include "../../../util/Time.h" #include "../MongoHeader.h" using namespace rdm; TEST_CASE("MongoTest", "[]") { mongocxx::instance inst{}; mongocxx::client conn{mongocxx::uri{"mongodb://ubuntu.a.com:27017"}}; bsoncxx::builder::stream::document document{}; auto collection = conn["testdb"]["testcollection"]; document << "hello" << "world"; StopWatch sw; sw.start(); for (int i = 0; i < 10 * 1000; ++i) { collection.insert_one(document.view()); } sw.end(); std::cout << "insert_one 1w, " << sw.durationMs() << " ms" << std::endl; sw.reset(); sw.start(); std::vector<bsoncxx::document::value> documents; documents.reserve(100 * 1000); for (int i = 0; i < 100 * 1000; ++i) { documents.push_back( bsoncxx::builder::stream::document{} << "i" << i << "j" << i << bsoncxx::builder::stream::finalize); } collection.insert_many(documents); sw.end(); std::cout << "insert_many 10w, " << sw.durationMs() << " ms" << std::endl; sw.reset(); // auto cursor = collection.find({}); // // for (auto&& doc : cursor) { // // { "_id" : { "$oid" : "5ed154df6644f10493208512" }, "hello" : "world" } // // std::cout << bsoncxx::to_json(doc) << std::endl; // REQUIRE(bsoncxx::to_json(doc).size() == 70); // } }
The Media and the Financial Markets: A Review An increasing stream of literature about the role of the media in finance has been developed in recent years. The textual analysis techniques and the increased availability of data have created the possibility of going deeper into an analysis of soft information, including newspaper articles and other media-generated content. This paper aims at putting forward knowledge about the medias role in finance, leveraging on a large variety of research from the field of finance, accounting, management, and economics. The goal and the contribution of this paper are to provide a response to these queries by leveraging on existing, but often unconnected, literature.
As a conventional technique, a capacitance type touch panel that has a built-in pressure sensor is known (Patent Literature 1). The sensor disclosed in Patent Literature 1 includes (i) an electrode pattern that is made of indium tin oxide (ITO), an electrostatic capacitance measuring device that is configured to connect with the electrode pattern via a contact point and to measure electrostatic capacitance between the electrode pattern and an ambient environment of the electrode pattern, and (iii) a resistance measuring device that is configured to measure resistance between two points in the electrode pattern. In a case where an electric conductor such as a finger has approached the electrode pattern, the electrostatic capacitance measuring device detects a location of the electric conductor based on change in electrostatic capacitance. Then, the resistance measuring device detects pressure that is applied by the electric conductor based on change in resistance of the electrode pattern which change is caused by pressure applied to the electrode pattern by the electric conductor.
Rooting Evaluation Guidelines in Relational Ethics: Lessons From Africa As global discussions of evaluation standards become more contextually nuanced, culturally responsive conceptions of ethics have not been sufficiently discussed. In academic social research, ethical clearance processes have been designed to protect vulnerable people from harm related to participation in a research project. This article expands the ambit of ethical protection thinking and proposes a relational ethics approach for evaluation practitioners. This centers an analysis of power relations among and within all the different stakeholder groups in order to establish, in a context-specific manner, which stakeholders are vulnerable and in need of protection. The approach also contextualizes the nature of the public good, as part of an ethical consideration of interest trade-offs during evaluations. The discussion is informed by our experiences in African contexts and speaks to the Made in Africa research agenda but is also relevant to other global contexts where alternatives to developed country ontological assumptions about the roles of researchers and participations and the nature of vulnerability are being reconsidered.
International law and the limits of global justice Abstract There are two central theses to this article, the first is that a special kind of governance authority is needed for principles of distributive social justice (social justice from now on) to be applicable to any sphere of human action. The second is that international law does not and cannot represent that kind of governance authority. It is not social justice-apt, in my terminology. This is due to the limits inherent in the statist character of international law, a character that underlies the point and purpose of international law in the first place. Putting these together, one can conclude that international law cannot be used to govern the global order according to those principles of social justice that liberal theorists typically defend in the domestic context. This shows that if the cosmopolitan project of extending social justice to the global arena does not find an alternative form of governance for the international order (the problem of cosmopolitan coordination) it ceases to be a viable project.
The Angular Spectrum of the Scattering Coefficient Map Reveals Subsurface Colorectal Cancer Colorectal cancer diagnosis currently relies on histological detection of endoluminal neoplasia in biopsy specimens. However, clinical visual endoscopy provides no quantitative subsurface cancer information. In this ex vivo study of nine fresh human colon specimens, we report the first use of quantified subsurface scattering coefficient maps acquired by swept-source optical coherence tomography to reveal subsurface abnormities. We generate subsurface scattering coefficient maps with a novel wavelet-based-curve-fitting method that provides significantly improved accuracy. The angular spectra of scattering coefficient maps of normal tissues exhibit a spatial feature distinct from those of abnormal tissues. An angular spectrum index to quantify the differences between the normal and abnormal tissues is derived, and its strength in revealing subsurface cancer in ex vivo samples is statistically analyzed. The study demonstrates that the angular spectrum of the scattering coefficient map can effectively reveal subsurface colorectal cancer and potentially provide a fast and more accurate diagnosis. and Yang et al. calculated the scattering coefficient from human ovary specimens using swept-source OCT 33. All these studies show statistical significance of quantified OCT features. These studies of human tissue provide the foundation for quantitative analysis of OCT A-scan images. Here, we have presented scattering maps using 3-D swept-source OCT images of human colorectal tissues and further extracted features from these scattering maps. In this communication, for the first time, we report an ex vivo study to reveal subsurface abnormities in nine fresh human colon specimens using quantified subsurface scattering coefficient maps acquired by swept-source optical coherence tomography (SSOCT). We generate subsurface scattering coefficients with a novel wavelet-based-curve-fitting method with significantly improved accuracy. Scattering coefficient maps are extracted from OCT C-scans providing a visualization of tissue optical properties. A 2-D Fourier transformation generates angular spectrums from scattering coefficient maps. An angular spectrum index (ASI) is derived to quantify the differences between the normal and abnormal tissues, and its strength in revealing subsurface cancer in ex vivo colorectal specimens is statistically analyzed. These preliminary results demonstrate the feasibility of using quantified SS-OCT to identify early mucosal neoplasms within the human colon. Results Generating scattering coefficients with a wavelet-based-curve-fitting method. Based on Beer's law, the scattering coefficients were quantified using SSOCT A-line signals from phantoms with negligible absorption 34 (see Methods). The scattering coefficients of the colorectal samples mostly fell within the range of the phantoms. Four different concentrations of intralipid of 1%, 5%, 10%, and 20% were used as liquid phantoms resulting in four measured scattering coefficients of 0.62 mm −1, 3.22 mm −1, 6.41 mm −1, and 8.03 mm −1 respectively. The measured results of the 1% and 20% intralipid were comparable to available literature data: 0.6 mm −1 for 1% intralipid 35 and 8 mm −1 for 20% intralipid 36. Three OCT datasets were used for quantification: data after wavelet analysis, data after nearby A-line average, and raw data. Three scattering coefficients calculated by the above three methods are compared to the measured scattering coefficient in Fig. 1. Red identifies the measured scattering coefficients which is used as reference scattering coefficient. Surrounding each red data point, scattering coefficients derived from the three methods (average, wavelet, and raw data) are displayed for the same phantom with error bars. Among all three methods, wavelet-based analysis (blue) best correlates with the accepted standard measurement value for each of the intralipid phantoms. The average method (gray) overestimates the scattering coefficients, while the raw-data method (purple) underestimates the scattering coefficients; comparisons of each method are provided in Table 1. Based on these results, we selected the wavelet-based-curve-fitting method for estimating scattering coefficients with OCT. It is worth mentioning that the wavelet multiresolution decomposition method is a well-developed speckle noise reduction method. Phantom scattering coefficient measurement. The reference scattering coefficient for each group of intralipid (red bar) is compared to three alternative coefficients calculated by the Near-by-average curve fitting method (gray bar), the Wavelet-based curve fitting method (blue bar), and the Raw data curve fitting method (purple). Error bars represent the standard deviation of each method. The wavelet-based-curve-fitting method (blue bar) best correlates with the accepted standard measurement value for each of the intralipid phantoms. www.nature.com/scientificreports www.nature.com/scientificreports/ OCT images of normal and cancerous specimens. Representative SS-OCT B-scan images of normal colon tissues, cancerous tissues, and corresponding H&E slides are shown in Fig. 2. The OCT and histologic images have the same scale and come from similar, but not identical, locations within the colon specimens. All the images are 0.75 mm wide and 10 mm long. Figures 2a,b are representative OCT B-scan images from two normal specimens. A dentate line structure, which may correlate to the presence of a regular and well-organized crypt pattern in the epithelium, is observed in these images. These serrated edges in the OCT images correspond to the surface morphology shown in their histology results in 2e and 2f. However, the cancerous tissues present themselves differently in their OCT images, shown in Fig. 2b,d. Significant surface erosions in cancer OCT Fig. 2c,d could be attributed to cancer invasions of the surface tissue. In the corresponding histology images 2 g and 2 h, the cancerous tissue appears highly irregular compared to the normal specimens, and shows a loss of normal colonic architecture. Scattering coefficient maps of human colorectal specimens. We performed 3-D mapping of the scattering coefficients of the epithelium layer and then derived the ASI of each map, as described in the Methods section. Figures 3 and 4 show scattering coefficient maps from colorectal specimens of three patients and one corresponding histology result. The white areas are regions that are out of focus. www.nature.com/scientificreports www.nature.com/scientificreports/ The en face scattering coefficient maps of the normal colons (Figs 3b and 4d) contain a large area of homogenous scattering coefficients with periodic dot patterns, while the scattering coefficient map of cancer region (Figs 3c and 4e) shows a large area of heterogeneous scattering coefficients. The normal map matches very well with the histological en face crypt structure, which is shown in magnification in Fig. 3d. The average dot-diameter (from eight random dots) is 67.88 m in the scattering map and 69.75 m in the histology, a very close match. The colon samples in Fig. 4 do not have such a close histological comparison because of the pathological process. The scattering distribution relates highly to the cancer's shape and inner structure. Since each colon cancer case can be pathologically different, there are no common features except for highly irregularity in the scattering coefficient maps of cancer tissues. Figure 4f shows the scattering coefficient map of a polyp that has been shown to be precancerous. The periodic pattern can hardly be visualized, and the map shows heterogeneity due to the abnormal growth. The mean scattering coefficients for all imaged specimens are shown in Fig. 5. The cancerous specimens' mean scattering coefficients ranged from 4.54 mm −1 to 9.17 mm −1, while the mean scattering coefficients for normal tissues lie between 2.50 mm −1 and 8.21 mm −1. The mean scattering coefficient for the polyp case is 5.31 mm −1. As we can see from Fig. 5, there is overlap between normal and cancer values due to the large variation of scattering coefficients within these tissue types. This observation of heterogeneity within the normal colonic mucosa could be due to different segments of colon imaged, variable amounts of ischemic time or the health status of the patient. Angular spectrum analysis and its strength in revealing subsurface cancer. Nine colon specimens, including eight cancerous regions, five normal regions, and one pre-malignant polyp were successfully imaged and processed. The angular spectrum was acquired by applying 2-D Fourier transform to selected regions of the scattering coefficient maps, with the results shown in the left column of Fig. 6. The right column of Fig. 6 shows ellipses on top of angular spectra for quantification of ASI according to the Methods section. Specifically, we use a Sobel edge detection method to find the area with valid signal, and then fit an ellipse using the least squares criterion (blue ellipses). Red ellipses are ellipses with quarter of the area of blue ellipses. While the blue ellipses include most of the scattering coefficient signals, the red ellipses enclose lower frequency components, corresponding to an inhomogeneous scattering coefficient distribution. Most high-frequency components, corresponding to a homogeneous and periodic scattering coefficient distribution, are outside of the red ellipse. An angular spectrum ring between the red ellipse and blue ellipses can be observed only in the normal cases. The angular spectrum ring corresponds to clear separation between the frequency components of the homogeneous and periodic scattering coefficient distribution and the spatial frequency components of the inhomogeneous scattering coefficient distribution. From the histology result (Fig. 3d), we calculated the average spatial frequency to be 12.93 mm −1 ; from the angular spectrum, we derived the average spatial frequency to be 11.89 mm −1. www.nature.com/scientificreports www.nature.com/scientificreports/ Unfortunately, due to the pathological processing, we were not able to obtain histological results for other normal regions of imaged colon specimens. However, we estimate the range to be 9.41 mm −1 to 16.93 mm −1 based on other normal angular spectrums. To measure the ring structure, the ASI was quantified by taking the ratio of the higher frequency components (integration of all signals in between the red and blue ellipse) to all frequency components (integration of all signals within the blue ellipse). This index separates five normal tissues from eight cancer tissues (Fig. 7, p-value www.nature.com/scientificreports www.nature.com/scientificreports/ <<0.001). One polyp, which is a precancerous lesion, is also shown in Fig. 7. It sits between the normal and cancerous tissues, which indicates a gradual structural change and the potential of using ASI to detect early stage colon cancer. Discussion In this pilot study, we evaluated the feasibility of qualitatively and quantitatively differentiating malignancies from normal colon tissue through optical coherence tomography (OCT). Scattering coefficient maps and angular spectrum analysis were calculated from OCT images generated from known malignant and normal tissue immediately after surgical resection. Qualitatively, 3-D scattering coefficient mapping of these specimens suggested unique subsurface microscopic optical scattering patterns that appear to differentiate malignant from normal tissue. Specifically, subsurface cancers destroy the homogenous crypt pattern seen in normal tissues and create random distributions. Quantitatively, angular spectra of the scattering maps demonstrate higher frequency components in normal tissues, shown as an angular spectrum ring pattern (Fig. 6). Further ASI quantification reveals the spatial frequency range of the normal crypt pattern, which significantly varies from ASI in cancerous tissues. www.nature.com/scientificreports www.nature.com/scientificreports/ Some highly scattering regions may look heterogeneous in normal scattering coefficient maps due to stronger scattering, e.g., the upper right portion of Fig. 4b. We cropped and visualized this region separately and it shows a periodic pattern similar to normal regions. Moreover, we have quantified the ASI of this cropped area and it lies within normal tissue ranges. (More details in Supplementary Materials.) Based on these findings, our system appears to differentiate organized normal colonic architecture from the irregular heterogeneous areas observed in malignant histology within this limited pilot study. Recent studies have shown that changes in crypt size and appearance are associated with the earliest forms of colorectal cancer 43 ; therefore, OCT's ability to image the mucosal architecture in real time may lead to more sensitive assessment of early malignancies and improved detection of residual malignant tissue after chemotherapy and radiation treatment. We also hypothesize that ASI quantification may be a key, objective tissue characteristic that informs clinical decision-making when evaluating the large bowel for cancer. Several studies have already shown the potential of camera-guided endoscopic OCT. Shen et al. demonstrated colonoscopic OCT for detecting transmural inflammation in inflammatory bowel disease 44, and Zagaynova et al. used camera-guided OCT to detect polyps 45. These studies focused on 2-D B-scan images. To reduce the potential for inter-operator variability when interpreting scatter coefficient maps, we hypothesize that 3-D C-scan imaging with real-time ASI quantification may provide objective clinical data that augments endoscopic evaluation. OCT has also been utilized as a correlation method for tissue angiography and microcirculation detection. However, several technical limitations currently reduce the clinical efficacy of the specific system as described. All image post-processing is based on CPU running MATLAB. The total image post-processing time for a 5 mm by 1 cm (500 B-scans, 1000 A-lines/B-scan, and 1024 pixels/A-line) area is twelve hours on a Dell Inspiron 3650 (x64-based, Intel i5-6400 CPU @ 2.70 GHz, 8GB RAM). To produce clinically relevant results, however, image processing and ASI quantification must be achieved in significantly shorter amounts of time. Future system improvements will therefore focus on GPU implementation and algorithm optimization to improve computational speed and accuracy. Then, in vivo study of system performance will be undertaken in an appropriately powered study to evaluate the clinical efficacy of this promising technology. The data presented here suggest that OCT imaging may produce qualitative and quantitative information that differentiates malignant from normal tissue in the human colon. After computational improvements and further testing, this system may augment traditional endoscopy when screening the large bowel for occult early malignancies or residual nests of cancer cells following initial oncologic therapy. Though promising, these preliminary results therefore warrant further study. Specifically, future efforts must focus on increasing the image processing speed and further evaluation of the scattering coefficient map and ASI quantification patterns in vivo. Conclusion We report the use of swept-source optical coherence tomography and a novel quantitative characteristic to differentiate malignant from normal tissue in nine fresh human colon specimens. Subsurface scattering coefficient maps were generated with a wavelet-based curve fitting method, and angular spectrum indices (ASI) were calculated for each imaged specimen. We found significant qualitative and quantitative differences between normal and malignant tissue. Among this limited sample, we demonstrated that the ASI varies significantly between normal and malignant tissue. While further system optimization and clinical testing are required, we conclude that SS-OCT may provide new diagnostic information when screening for early cancers or surveilling known disease following oncologic therapy. Future work will include system optimization to reduce image processing time, construction of an endoscopic device for further testing, and performance of an appropriately powered in vivo study to refine the accuracy of our system. Method and Materials In this section, we describe the colon specimen preparation, the SS-OCT system, the novel wavelet-basedcurve-fitting verification, the scattering coefficient mapping generation, the angular spectrum, and the ASI calculation. University School of Medicine were recruited in our initial study. From these patients' operative specimens, we imaged and processed eight cancers, one pre-malignant polyp, and five representative areas with no gross abnormality. For each image of a specimen, we selected an area 10 mm 20 mm and processed a region of interest for 3-D mapping of the scattering coefficients. This study was approved by the Institutional Review Board of Washington University School of Medicine, and informed consent was obtained from all patients. All samples were imaged immediately upon resection, prior to fixation in formalin. All methods were performed in accordance with the relevant guidelines and regulations. OCT System Setup. The SS-OCT system (details see Supplementary Material) is based on a swept source (HSL-2000, Santec Corp., Japan) with a 1310 nm center wavelength, 110 nm full width at half maximum bandwidth, and 20 kHz scan rate. The interference signal was detected by a balanced detector (Thorlabs PDB450C) and sent to a data acquisition board (ATS9462, Alazartec Technologies Inc). The lateral resolution of the system in air was 10 m, and the axial resolution was 6 m. To balance the effects of system signal-to-noise ratio roll-off and Gaussian beam focusing, we performed a calibration test by measuring attenuated mirror signals from different imaging depths. Scattering Coefficient Mapping. The scattering coefficient within the colon epithelium layer was calculated by fitting each A-scan with a single attenuation model based on Beer's law : where i(z) is the OCT signal and the factor of 2 accounts for the round-trip attenuation. t = a + s is the total absorption coefficient, which is the summation of the absorption coefficient a and the scattering coefficient s. Since in soft tissue a is much less than s, the fitted t was used as a good approximation of s. We semi-automatically located the colon surface (for details, see Supplementary Materials) and then added a thickness to obtain the epithelium region. The area between the two red curves in Fig. 8a,b identifies the colonic epithelium layer, and the curve fitting for one A-line from the de-noised signal is shown in Fig. 8c,d. All A-lines within a B-scan are fitted. Afterwards we performed this fitting to consecutive B-scans, then generated an en face scattering coefficient map of the processed area. Data De-noising and Method Verification. In this study, we compared fitted scattering coefficients to the measurement results from data after wavelet analysis, data after applying the nearby average method, and raw data. We used a plug-in, developed by Marco Rossini, in Gimp 2.0 to perform wavelet analysis. To verify the accuracy of the fitted result, a phantom-based experiment was conducted. Intralipid 20% (manufactured by Fresenius Kabi, Uppsala, Sweden for Baxter Healthcare), and deionized water were used for preparing liquid phantoms with different scattering coefficients. Four different concentrations of intralipid of 1%, 5%, 10%, and 20% were used as liquid phantoms. Then we measured the extinction coefficient, based on I(z) = I exp(− t z). The experiment schematic is based on Flock et al. 's setup 53. We first measured the light intensity after it was collimated, passed through the cuvette without liquid, passed through two apertures to block forward scattered light and focused by a lens. This intensity was used as I. Then we filled the hole with liquid phantom and performed the measurement again. This intensity was recorded as I(z). After substituting these values, we obtained the scattering coefficient. We used the mean of the measurements as our standard. Afterwards we used SS-OCT to scan the phantoms and fitted the scattering coefficients using nearby averaged data, wavelet-analyzed data, and raw data, respectively. www.nature.com/scientificreports www.nature.com/scientificreports/ Angular spectrum. One new image feature, extracted from the scattering map, is based on the observed differences between the normal and abnormal tissue scattering maps in terms of the spatial distributions. We first cropped the scattering maps to avoid out of focus or hyper-reflection areas. Then 2-D Fourier transform was used to reveal the angular spectrum of these maps (Fig. 6a-d). A Sobel edge detection was performed in MATLAB to acquire a region with valid frequency information, and the border of the detected edges were fitted to an ellipse using the least squares criterion. The results are shown as blue ellipses in Fig. 6e-h. The ring structure observed in normal tissues represents the periodic crypt pattern. The spatial frequency is derived as where f major stands for the spatial frequency of the major axis and f minor stands for the spatial frequency of the minor axis. To further quantify the ASI, ellipse with a quarter of the area of the blue ellipse was used to depict the signal focused in the center region, which corresponds to the red ellipses in Fig. 6e-h. (See Supplementary Materials for more details). Then, to identify the unique ring structure, we defined the ASI as how much signal is outside the center: ASI all inner all all integration of all signals within the blue ellipses inner integration of all signals within the red ellipses We then took the average ASI of all cropped areas from one scattering map as the ASI of this map. Data analysis. Statistical analysis was performed using MATLAB R2016a. The ASIs of normal and cancerous tissues were compared using student's t-test, and p < 0.05 was considered statistically significant. Data Availability The data that support the plots within this paper and other findings of this study are available from the corresponding author upon reasonable request.
import axios from 'axios'; export namespace CustomResource { export interface EventProps { RequestType: string; ServiceToken: string; ResponseURL: string; StackId: string; RequestId: string; LogicalResourceId: string; PhysicalResourceId: string; ResourceType: string; ResourceProperties: { [key: string]: any; }; OldResourceProperties?: { [key: string]: any; }; } export class Request { public readonly _event: EventProps; constructor(event: { [key: string]: any }) { this._event = event as EventProps; } public on(types: string | string[]): boolean { if (typeof types === 'string') { types = [types]; } return types.map(type => { return type.toLowerCase(); }).indexOf( this._event.RequestType.toLowerCase(), ) > -1; } public requestType(): string { return this._event.RequestType; } public properties(): { [key: string]: string } { return this._event.ResourceProperties ?? {}; } public property(key: string): string | null { return this.properties()[key] ?? null; } public oldProperties(): { [key: string]: string } { return this._event.OldResourceProperties ?? {}; } public oldProperty(key: string): string | null { return this.oldProperties()[key] ?? null; } } export class Response { private readonly _event: EventProps; constructor(event: { [key: string]: any }) { this._event = event as EventProps; } public async success(data: { [key: string]: string }): Promise<{}> { return this.send(data, null); } public async failed(error: Error): Promise<{}> { return this.send(null, error); } public send(data: { [key: string]: string } | null, error: Error | null): Promise<{}> { const responseBody = JSON.stringify({ Status: error ? 'FAILED' : 'SUCCESS', Reason: error ? error.toString() : 'Success', PhysicalResourceId: this._event.PhysicalResourceId ?? this._event.RequestId, StackId: this._event.StackId, RequestId: this._event.RequestId, LogicalResourceId: this._event.LogicalResourceId, Data: data, }); return axios.put(this._event.ResponseURL, responseBody); } } }
The Bucs plan to release running back Doug Martin and defensive tackle Chris Baker sometime before the NFL scouting combine which begins Feb. 27 in Indianapolis. There likely will be more, but they still are undecided on the future of defensive ends Robert Ayers and William Gholston. The team also is expected to exercise club options for 2018 on the contracts of safety Chris Conte, tackle Demar Dotson, defensive back Josh Robinson and center Joe Hawley. There's a lot of personnel decisions looming in the next few weeks for general manager Jason Licht and coach Dirk Koetter. These are fluid discussions, but here's where the Bucs are with some of their major decisions. Martin failed to rush for 500 yards in four of the past five seasons. Over the past two seasons,he missed four games serving a suspension for violating the league's policy on performance enhancing drugs and was inactive one last year for violating an undisclosed team rule. Martin signed a five-year, $35.75-million contract in 2016, but his base salary of $6.75 million is not guaranteed in 2018 and would represent a significant savings by releasing the 29-year old running back. After signing a three-year, $15.75-million contract last March, Baker underperformed in nearly every area. He had 33 tackles and a half sack. More damning is nobody liked his attitude. It came to a boil after a Christmas Eve loss at Carolina when his critical encroachment penalty on fourth down led to a locker room confrontation with teammates. Baker was scheduled to earn a base salary of $4.875 million in 2018. None of it is guaranteed. Reason for cutting ties: Ayers is 32 and only two sacks last season. It's a big reason why the Bucs were last in the NFL with 22 sacks. In the past two seasons, Ayers has missed a total of eight games with various injuries. Ayers has a $1 million roster bonus due later next month and a $5 million base salary that is not guaranteed. Releasing him would save $6 million on the salary cap. Reason for bringing him back in 2018: The Bucs already need a pretty complete rebuild on the defensive line. DTs Clinton McDonald, Sealver Siliga and DE Will Clarke all are unrestricted free agents. If you can swallow the salary, Ayers gives you a veteran presence for what is sure to be an influx of young talent on the defensive line. Despite his array of injuries, only Gerald McCoy played more snaps. Ayers also had 45 quarterback pressures, including 17 hits and 26 hurries, according to Pro Football Focus. Reason for cutting ties: After signing a five-year, $27.5-million contract last March, Gholston had only 36 tackles and no sacks. He is owed a base salary is $6.5-million for 2018. Reason for bringing him back: Gholston wasn't signed for his pass rush ability. He is only 26 and he's good against the run. The salary isn't prohibitive for a team that could be more than $70 million under the salary cap. Coming off one of his best seasons, Dotson missed the final four games with a knee injury. Dotson signed a unique deal in which the team has until March 14 to exercise its option. He has a $750,000 roster bonus for 2018. With a cap figure at $4.615-million, Dotson still is a relative bargain. The Bucs aren't deep at safety. T.J. Ward is not a player the Bucs want to return. Keith Tandy also is an unrestricted free agent. Conte had 77 tackles, three forced fumbles and an INT. He has a $250,000 bonus due on March 18 if they exercise the club option. His $2.25-million base salary is reasonable for a part-time starter. One of the Bucs' premier players on special teams, Robinson battled injuries (concussion/hamstring) in 2017. But he will get to finish the two-year, $5-million contract signed last March as the team is expected to pay a $250,000 bonus to exercise a club option for 2018. Hawley was inactive for much of the season until the injury to center Ali Marpet forced him into action. With Evan Smith expected to become a free agent, he's a versatile player to have around with starting experience and only $2.62 million against the salary cap. Bucs will likely pay his $250,000 bonus to exercise the one-year club option.
Wendy's is embroiled in a battle over fresh beef patties after the fast food chain came after McDonald's on Twitter. McDonald's announced in a tweet that it would use fresh beef in its Quarter Pounder burgers in most of its restaurants by mid-2018. Wendy's tweeted in response: "So you'll still use frozen beef in MOST of your burgers in ALL of your restaurants? Asking for a friend." @Wendys Tweet: So you'll still use frozen beef in MOST of your burgers in ALL of your restaurants? Asking for a friend." McDonald's has so far not yet responded to the tweet from Wendy's. Wendy's CEO Todd Penegor said Monday that the fast food chain is being a "challenger with charm" and is simply defending its territory. "If you think about our brand, right, we have been created on fresh never frozen North American beef since 1969," Penegor said on CNBC's "Power Lunch." "We have been serving fresh beef in all of our restaurants in all of our hamburgers for almost 48 years." Credit Suisse analysts Jason West and Jordy Winslow said the push by McDonald's into the fresh beef territory could improve the company's same-store sales. They said 15 percent of those gains could be snatched from Wendy's, thus hurting the fast food chain's same-store sales by 30 basis points. Penegor said he is not worried. He said he sees his rival's announcement as a chance to allow more customers to recognize them as the originals in the fresh beef patty space. "Today about 3 in 10 consumers really understand that Wendy's is fresh never frozen," Penegor said. "We have a great opportunity to amplify that message."
Very early changes in circulating T3 and rT3 during development of metabolic derangement in diabetic patients. Alterations in circulating iodothyronines were studied in 15 juvenile type diabetic patients during the development of metabolic derangement after withdrawal of insulin. By means of measurements of circulating C peptide, one group of patients with and one without residual beta-cell function had been selected. In both groups there was a gradual decrease in serum T3 during the 12-hour period studied after withdrawal of insulin, while an increase in serum rT3 was observed after 4-6 hours. The alterations in serum T3 and the metabolic derangement were significantly more pronounced in patients without than with residual beta-cell function.
<filename>darwin/state.go package darwin // State ... type State int // State ... const ( StateUnknown State = 0 StateResetting State = 1 StateUnsupported State = 2 StateUnauthorized State = 3 StatePoweredOff State = 4 StatePoweredOn State = 5 ) func (s State) String() string { str := []string{ "Unknown", "Resetting", "Unsupported", "Unauthorized", "PoweredOff", "PoweredOn", } return str[int(s)] }
<gh_stars>0 #include "pch.h" #include "FS_VirtualAddress.h" GxDirect::FeaturSupport::FS_VirtualAddress::FS_VirtualAddress(GxDirect::XContext* ptrContext) { // Get device ID3D12Device* ptrDevice; ptrContext->getDevice(&ptrDevice); // Create feature query D3D12_FEATURE_DATA_GPU_VIRTUAL_ADDRESS_SUPPORT featureData; ZeroMemory(&featureData, sizeof(D3D12_FEATURE_DATA_GPU_VIRTUAL_ADDRESS_SUPPORT)); // Query for featue support if (SUCCEEDED(m_hrQueryResult = ptrDevice->CheckFeatureSupport(D3D12_FEATURE_GPU_VIRTUAL_ADDRESS_SUPPORT, &featureData, sizeof(D3D12_FEATURE_DATA_GPU_VIRTUAL_ADDRESS_SUPPORT)))) { // Set values m_siMaxGPUVirtualAddressBitsPerResource = featureData.MaxGPUVirtualAddressBitsPerResource; m_siMaxGPUVirtualAddressBitsPerProcess = featureData.MaxGPUVirtualAddressBitsPerProcess; } // Release device COM_RELEASE(ptrDevice); } HRESULT GxDirect::FeaturSupport::FS_VirtualAddress::getQueryResult() { return m_hrQueryResult; } void GxDirect::FeaturSupport::FS_VirtualAddress::gennerateTextRepresentation() { #if defined(SHIPPING) throw EXEPTION(L"gennerateTextRepresentation(...) is not supported in shipping configuration"); return; #else // String builder std::stringstream ss; // MaxGPUVirtualAddressBitsPerResource m_siMaxGPUVirtualAddressBitsPerResource.strName = "MaxGPUVirtualAddressBitsPerResource"; ss << m_siMaxGPUVirtualAddressBitsPerResource.data; m_siMaxGPUVirtualAddressBitsPerResource.strValue = ss.str(); // MaxGPUVirtualAddressBitsPerProcess m_siMaxGPUVirtualAddressBitsPerProcess.strName = "MaxGPUVirtualAddressBitsPerProcess"; ss.str(""); ss << m_siMaxGPUVirtualAddressBitsPerProcess.data; m_siMaxGPUVirtualAddressBitsPerProcess.strValue = ss.str(); #endif } const TSysInfo(UINT)& GxDirect::FeaturSupport::FS_VirtualAddress::getMaxGPUVirtualAddressBitsPerResource() { return m_siMaxGPUVirtualAddressBitsPerResource; } const TSysInfo(UINT)& GxDirect::FeaturSupport::FS_VirtualAddress::getMaxGPUVirtualAddressBitsPerProcess() { return m_siMaxGPUVirtualAddressBitsPerProcess; }