code
stringlengths 67
466k
| docstring
stringlengths 1
13.2k
|
---|---|
private static LocalizationContext findMatch(PageContext pageContext,
String basename) {
LocalizationContext locCtxt = null;
// Determine locale from client's browser settings.
for (Enumeration enum_ = Util
.getRequestLocales((HttpServletRequest) pageContext
.getRequest()); enum_.hasMoreElements();) {
Locale pref = (Locale) enum_.nextElement();
ResourceBundle match = findMatch(basename, pref);
if (match != null) {
locCtxt = new LocalizationContext(match, pref);
break;
}
}
return locCtxt;
} | Determines the client's preferred locales from the request, and compares
each of the locales (in order of preference) against the available
locales in order to determine the best matching locale.
@param pageContext the page in which the resource bundle with the given
base name is requested @param basename the resource bundle's base name
@return the localization context containing the resource bundle with the
given base name and best matching locale, or <tt>null</tt> if no
resource bundle match was found |
private static ResourceBundle findMatch(String basename, Locale pref) {
ResourceBundle match = null;
try {
ResourceBundle bundle = ResourceBundle.getBundle(basename, pref,
Thread.currentThread().getContextClassLoader());
Locale avail = bundle.getLocale();
if (pref.equals(avail)) {
// Exact match
match = bundle;
} else {
/*
* We have to make sure that the match we got is for the
* specified locale. The way ResourceBundle.getBundle() works,
* if a match is not found with (1) the specified locale, it
* tries to match with (2) the current default locale as
* returned by Locale.getDefault() or (3) the root resource
* bundle (basename). We must ignore any match that could have
* worked with (2) or (3). So if an exact match is not found, we
* make the following extra tests: - avail locale must be equal
* to preferred locale - avail country must be empty or equal to
* preferred country (the equality match might have failed on
* the variant)
*/
if (pref.getLanguage().equals(avail.getLanguage())
&& ("".equals(avail.getCountry()) || pref.getCountry()
.equals(avail.getCountry()))) {
/*
* Language match. By making sure the available locale does
* not have a country and matches the preferred locale's
* language, we rule out "matches" based on the container's
* default locale. For example, if the preferred locale is
* "en-US", the container's default locale is "en-UK", and
* there is a resource bundle (with the requested base name)
* available for "en-UK", ResourceBundle.getBundle() will
* return it, but even though its language matches that of
* the preferred locale, we must ignore it, because matches
* based on the container's default locale are not portable
* across different containers with different default
* locales.
*/
match = bundle;
}
}
} catch (MissingResourceException mre) {
}
return match;
} | Gets the resource bundle with the given base name and preferred locale.
This method calls java.util.ResourceBundle.getBundle(), but ignores its
return value unless its locale represents an exact or language match with
the given preferred locale.
@param basename the resource bundle base name @param pref the preferred
locale
@return the requested resource bundle, or <tt>null</tt> if no resource
bundle with the given base name exists or if there is no exact- or
language-match between the preferred locale and the locale of the bundle
returned by java.util.ResourceBundle.getBundle(). |
public RestRequestBuilder<B, R> addQueryParam( String name, Collection<?> values ) {
if ( null != values ) {
List<Object> allValues = getQueryParams( name );
allValues.addAll( values );
}
return this;
} | Add a query parameter. If a null or empty collection is passed, the param is ignored.
@param name Name of the parameter
@param values Value of the parameter
@return this builder |
public RestRequestBuilder<B, R> addQueryParam( String name, Object[] values ) {
if ( null != values ) {
List<Object> allValues = getQueryParams( name );
for ( Object value : values ) {
allValues.add( value );
}
}
return this;
} | Add a query parameter. If a null or empty array is passed, the param is ignored.
@param name Name of the parameter
@param values Value of the parameter
@return this builder |
@Override
void encode(byte[] in, int inPos, int inAvail) {
if (eof) {
return;
}
// inAvail < 0 is how we're informed of EOF in the underlying data we're
// encoding.
if (inAvail < 0) {
eof = true;
if (0 == modulus && lineLength == 0) {
return; // no leftovers to process and not using chunking
}
ensureBufferSize(encodeSize);
int savedPos = pos;
switch (modulus) { // 0-2
case 1: // 8 bits = 6 + 2
buffer[pos++] = encodeTable[(bitWorkArea >> 2) & MASK_6BITS]; // top 6 bits
buffer[pos++] = encodeTable[(bitWorkArea << 4) & MASK_6BITS]; // remaining 2
// URL-SAFE skips the padding to further reduce size.
if (encodeTable == STANDARD_ENCODE_TABLE) {
buffer[pos++] = PAD;
buffer[pos++] = PAD;
}
break;
case 2: // 16 bits = 6 + 6 + 4
buffer[pos++] = encodeTable[(bitWorkArea >> 10) & MASK_6BITS];
buffer[pos++] = encodeTable[(bitWorkArea >> 4) & MASK_6BITS];
buffer[pos++] = encodeTable[(bitWorkArea << 2) & MASK_6BITS];
// URL-SAFE skips the padding to further reduce size.
if (encodeTable == STANDARD_ENCODE_TABLE) {
buffer[pos++] = PAD;
}
break;
}
currentLinePos += pos - savedPos; // keep track of current line position
// if currentPos == 0 we are at the start of a line, so don't add CRLF
if (lineLength > 0 && currentLinePos > 0) {
System.arraycopy(lineSeparator, 0, buffer, pos, lineSeparator.length);
pos += lineSeparator.length;
}
} else {
for (int i = 0; i < inAvail; i++) {
ensureBufferSize(encodeSize);
modulus = (modulus + 1) % BYTES_PER_UNENCODED_BLOCK;
int b = in[inPos++];
if (b < 0) {
b += 256;
}
bitWorkArea = (bitWorkArea << 8) + b; // BITS_PER_BYTE
if (0 == modulus) { // 3 bytes = 24 bits = 4 * 6 bits to extract
buffer[pos++] = encodeTable[(bitWorkArea >> 18) & MASK_6BITS];
buffer[pos++] = encodeTable[(bitWorkArea >> 12) & MASK_6BITS];
buffer[pos++] = encodeTable[(bitWorkArea >> 6) & MASK_6BITS];
buffer[pos++] = encodeTable[bitWorkArea & MASK_6BITS];
currentLinePos += BYTES_PER_ENCODED_BLOCK;
if (lineLength > 0 && lineLength <= currentLinePos) {
System.arraycopy(lineSeparator, 0, buffer, pos, lineSeparator.length);
pos += lineSeparator.length;
currentLinePos = 0;
}
}
}
}
} | <p>
Encodes all of the provided data, starting at inPos, for inAvail bytes. Must be called at least twice: once with the data
to encode, and once with inAvail set to "-1" to alert encoder that EOF has been reached, so flush last remaining bytes
(if not multiple of 3).
</p>
<p>
Thanks to "commons" project in ws.apache.org for the bitwise operations, and general approach.
http://svn.apache.org/repos/asf/webservices/commons/trunk/modules/util/
</p>
@param in byte[] array of binary data to base64 encode.
@param inPos Position to start reading data from.
@param inAvail Amount of bytes available from input for encoding. |
@Override
void decode(byte[] in, int inPos, int inAvail) {
if (eof) {
return;
}
if (inAvail < 0) {
eof = true;
}
for (int i = 0; i < inAvail; i++) {
ensureBufferSize(decodeSize);
byte b = in[inPos++];
if (b == PAD) {
// We're done.
eof = true;
break;
} else {
if (b >= 0 && b < DECODE_TABLE.length) {
int result = DECODE_TABLE[b];
if (result >= 0) {
modulus = (modulus + 1) % BYTES_PER_ENCODED_BLOCK;
bitWorkArea = (bitWorkArea << BITS_PER_ENCODED_BYTE) + result;
if (modulus == 0) {
buffer[pos++] = (byte) ((bitWorkArea >> 16) & MASK_8BITS);
buffer[pos++] = (byte) ((bitWorkArea >> 8) & MASK_8BITS);
buffer[pos++] = (byte) (bitWorkArea & MASK_8BITS);
}
}
}
}
}
// Two forms of EOF as far as base64 decoder is concerned: actual
// EOF (-1) and first time '=' character is encountered in stream.
// This approach makes the '=' padding characters completely optional.
if (eof && modulus != 0) {
ensureBufferSize(decodeSize);
// We have some spare bits remaining
// Output all whole multiples of 8 bits and ignore the rest
switch (modulus) {
// case 1: // 6 bits - ignore entirely
// break;
case 2: // 12 bits = 8 + 4
bitWorkArea = bitWorkArea >> 4; // dump the extra 4 bits
buffer[pos++] = (byte) ((bitWorkArea) & MASK_8BITS);
break;
case 3: // 18 bits = 8 + 8 + 2
bitWorkArea = bitWorkArea >> 2; // dump 2 bits
buffer[pos++] = (byte) ((bitWorkArea >> 8) & MASK_8BITS);
buffer[pos++] = (byte) ((bitWorkArea) & MASK_8BITS);
break;
}
}
} | <p>
Decodes all of the provided data, starting at inPos, for inAvail bytes. Should be called at least twice: once with the
data to decode, and once with inAvail set to "-1" to alert decoder that EOF has been reached. The "-1" call is not
necessary when decoding, but it doesn't hurt, either.
</p>
<p>
Ignores all non-base64 characters. This is how chunked (e.g. 76 character) data is handled, since CR and LF are silently
ignored, but has implications for other bytes, too. This method subscribes to the garbage-in, garbage-out philosophy: it
will not check the provided data for validity.
</p>
<p>
Thanks to "commons" project in ws.apache.org for the bitwise operations, and general approach.
http://svn.apache.org/repos/asf/webservices/commons/trunk/modules/util/
</p>
@param in byte[] array of ascii data to base64 decode.
@param inPos Position to start reading data from.
@param inAvail Amount of bytes available from input for encoding. |
public static byte[] encodeInteger(BigInteger bigInt) {
if (bigInt == null) {
throw new NullPointerException("encodeInteger called with null parameter");
}
return encodeBase64(toIntegerBytes(bigInt), false);
} | Encodes to a byte64-encoded integer according to crypto standards such as W3C's XML-Signature
@param bigInt a BigInteger
@return A byte array containing base64 character data
@throws NullPointerException if null is passed in
@since 1.4 |
@Override
public HttpFilterBuilder equal(final String value) {
return addUriRequestFilter(new Matcher<String>() {
@Override
public boolean matches(String val) {
return val.equals(value);
}
@Override
public String toString() {
return String.format("equal('%s')", value);
}
});
} | {@inheritDoc} |
@Override
public HttpFilterBuilder equal(final HttpMethod value) {
return addMethodRequestFilter(new Matcher<HttpMethod>() {
@Override
public boolean matches(HttpMethod val) {
return val.equals(value);
}
@Override
public String toString() {
return String.format("method(%s)", value);
}
});
} | {@inheritDoc} |
private String removeEnclosedCurlyBraces( String str ) {
final char curlyReplacement = 6;
char[] chars = str.toCharArray();
int open = 0;
for ( int i = 0; i < chars.length; i++ ) {
if ( chars[i] == '{' ) {
if ( open != 0 ) chars[i] = curlyReplacement;
open++;
}
else if ( chars[i] == '}' ) {
open--;
if ( open != 0 ) {
chars[i] = curlyReplacement;
}
}
}
char[] res = new char[chars.length];
int j = 0;
for( int i = 0; i < chars.length; i++ ) {
if( chars[i] != curlyReplacement ) {
res[j++] = chars[i];
}
}
return new String( Arrays.copyOf( res, j ) );
} | Enclosed curly braces cannot be matched with a regex. Thus we remove them before applying the replaceAll method |
public static WarpExecutionBuilder initiate(Activity activity) {
WarpRuntime runtime = WarpRuntime.getInstance();
if (runtime == null) {
throw new IllegalStateException(
"The Warp runtime isn't initialized. You need to make sure arquillian-warp-impl is on classpath and annotate a test class with @WarpTest in order to initialize Warp.");
}
return runtime.getWarpActivityBuilder().initiate(activity);
} | Takes client activity which should be performed in order to cause server request.
@param activity the client activity to execute
@return {@link WarpActivityBuilder} instance |
@Override
public HttpFilterBuilder equal(final String name, final String value) {
return addFilter(new RequestFilter<HttpRequest>() {
@Override
public boolean matches(HttpRequest request) {
String headerValue = request.getHeader(name);
return headerValue != null ? headerValue.equals(value) : value == null;
}
@Override
public String toString() {
return String.format("header.equal('%s', '%s')", name, value);
}
});
} | {@inheritDoc} |
@Override
public HttpFilterBuilder containsHeader(final String name) {
return addFilter(new RequestFilter<HttpRequest>() {
@Override
public boolean matches(HttpRequest request) {
return request.containsHeader(name);
}
@Override
public String toString() {
return String.format("containsHeader('%s')", name);
}
});
} | {@inheritDoc} |
@Override
public HttpFilterBuilder containsValue(final String name, final String value) {
return addFilter(new RequestFilter<HttpRequest>() {
@Override
public boolean matches(HttpRequest request) {
List<String> values = request.getHeaders(name);
for (String val : values) {
if (val.equals(values)) {
return true;
}
}
return false;
}
@Override
public String toString() {
return String.format("containsValue('%s', '%s')", name, value);
}
});
} | {@inheritDoc} |
public int readInt() throws IOException {
int value = 0;
value += (read() & 0xff) << 0;
value += (read() & 0xff) << 8;
value += (read() & 0xff) << 16;
value += (read() & 0xff) << 24;
return (int) value;
} | Read 32bit word as integer.
@return the integer value
@throws IOException |
public int[] readIntArray(int length) throws IOException {
int arr[] = new int[length];
for (int i = 0; i < length; i++) {
arr[i] = readInt();
}
return arr;
} | Read an array of 32bit words.
@param length
size of the array (in 32bit word count, not byte count)
@return the array of 32bit words
@throws IOException |
public byte[] readByteArray(int length) throws IOException {
byte buf[] = new byte[length];
for (int i = 0; i < length; i++) {
buf[i] = (byte) read();
}
return buf;
} | Read an array of bytes.
@param length
size of the array (in bytes)
@return the array of bytes
@throws IOException |
public int doEndTag() throws JspException {
if (value == null) {
if (var != null) {
pageContext.removeAttribute(var, scope);
}
return EVAL_PAGE;
}
// Create formatter
DateTimeFormatter formatter;
if (pattern != null) {
formatter = DateTimeFormat.forPattern(pattern);
} else if (style != null) {
formatter = DateTimeFormat.forStyle(style);
} else {
// use a medium date (no time) style by default; same as jstl
formatter = DateTimeFormat.mediumDate();
}
// set formatter locale
Locale locale = this.locale;
if (locale == null) {
locale = Util.getFormattingLocale(pageContext, this, true,
DateFormat.getAvailableLocales());
}
if (locale != null) {
formatter = formatter.withLocale(locale);
}
// set formatter timezone
DateTimeZone tz = this.dateTimeZone;
if (tz == null) {
tz = DateTimeZoneSupport.getDateTimeZone(pageContext, this);
}
if (tz != null) {
formatter = formatter.withZone(tz);
}
// format value
String formatted;
if (value instanceof ReadableInstant) {
formatted = formatter.print((ReadableInstant) value);
} else if (value instanceof ReadablePartial) {
formatted = formatter.print((ReadablePartial) value);
} else {
throw new JspException(
"value attribute of format tag must be a ReadableInstant or ReadablePartial," +
" was: " + value.getClass().getName());
}
if (var != null) {
pageContext.setAttribute(var, formatted, scope);
} else {
try {
pageContext.getOut().print(formatted);
} catch (IOException ioe) {
throw new JspTagException(ioe.toString(), ioe);
}
}
return EVAL_PAGE;
} | /*
Formats the given instant or partial. |
public static void asUnchecked(Throwable t, Class<? extends RuntimeException> checkedExceptionWrapper) {
if (t instanceof AssertionError) {
throw (AssertionError) t;
} else if (t instanceof RuntimeException) {
throw (RuntimeException) t;
} else {
RuntimeException exception;
try {
exception = checkedExceptionWrapper.getConstructor(Throwable.class).newInstance(t);
} catch (Exception e) {
throw new RuntimeException(t);
}
throw exception;
}
} | Checks whether given throwable is unchecked and if not, it will be wrapped as unchecked exception of given type |
public static Throwable getOriginalCause(Throwable e) {
if (e instanceof InvocationTargetException) {
return getOriginalCause(((InvocationTargetException) e).getTargetException());
}
if (e.getCause() instanceof Throwable) {
return getOriginalCause(e.getCause());
}
return e;
} | Retrieves original cause from a stack of exceptions bound together with {@link Throwable#getCause()} references. |
public Redis format(RedisFormat format) {
String strFormat;
switch (format) {
case JSON_INTERACTION_META:
strFormat = "json_interaction_meta";
break;
default:
case JSON_INTERACTION:
strFormat = "json_interaction";
break;
}
return setParam("format", strFormat);
} | /*
The output format for your data:
json_interaction_meta - This is specific to the Redis connector for now. Each interaction is sent separately
except it is framed with metadata.
json_interaction - This is specific to the Redis connector for now. Each interaction is sent separately with
no metadata.
If you omit this parameter or set it to json_interaction_meta, each interaction will be delivered with
accompanying metadata. Both the interactions and the metadata are delivered as JSON objects.
<p/>
Take a look at our Sample Output for File-Based Connectors page.
<p/>
If you select json_interaction, DataSift omits the metadata and sends each interaction as a single JSON object.
@return this |
public void provideWarpContext(@Observes EventContext<ExecuteWarp> eventContext) {
warpExecutionContext.get().activate();
try {
WarpContext context = eventContext.getEvent().getWarpContext();
warpContext.set(context);
WarpContextStore.set(context);
synchronization.set(eventContext.getEvent().getWarpContext().getSynchronization());
eventContext.proceed();
} finally {
WarpContextStore.reset();
warpExecutionContext.get().deactivate();
}
} | Activates/deactivates {@link WarpExecutionContext}.
<p>
Provides {@link WarpContext} instance.
<p>
Provides {@link SynchronizationPoint} instance.
<p>
Setups/resets {@link WarpContextStore} |
public SplunkStorm format(SplunkStormFormat format) {
String strFormat;
switch (format) {
case JSON_NEW_LINE_TIMESTAMP:
strFormat = "json_new_line";
break;
default:
case JSON_NEW_LINE_TIMESTAMP_META:
strFormat = "json_new_line_meta";
break;
}
return setParam("format", strFormat);
} | /*
The output format for your data:
<p/>
json_meta - The current default format, where each payload contains a full JSON document. It contains metadata
and an "interactions" property that has an array of interactions.
<p/>
json_array - The payload is a full JSON document, but just has an array of interactions.
<p/>
json_new_line - The payload is NOT a full JSON document. Each interaction is flattened and separated by a line
break.
If you omit this parameter or set it to json_meta, your output consists of JSON metadata followed by a JSON
array of interactions (wrapped in square brackets and separated by commas).
Take a look at our Sample Output for File-Based Connectors page.
If you select json_array, DataSift omits the metadata and sends just the array of interactions.
If you select json_new_line, DataSift omits the metadata and sends each interaction as a single JSON object.
@return this |
public Instagram byTag(String tag, boolean exactMatch) {
addResource(Type.TAG, tag, -1, -1, -1, exactMatch, null);
return this;
} | /*
Adds a resource by tag
@param tag the tag, e.g cat
@param exactMatch true when a tag must be an exact match or false when the tag should match the instragram tag
search behaviour
@return this |
protected Instagram addResource(Type type, String value, float longitude, float lattitude, int distance,
boolean exactMatch, String fourSquareLocation) {
ResourceParams parameterSet = newResourceParams();
switch (type) {
case TAG:
if (value == null) {
throw new IllegalArgumentException("If type is user then value is required");
}
parameterSet.set("type", "tag");
parameterSet.set("value", value);
parameterSet.set("extact_match", exactMatch);
break;
}
return this;
} | /*
Adds a resource object to the request of the given type, which is always required
@param type the type of resource, all other params are optional dependent upon what this value is
@return this |
public static String getMessage(String name, Object[] a)
throws MissingResourceException {
String res = rb.getString(name);
return MessageFormat.format(res, a);
} | Retrieves a message with arbitrarily many arguments. |
public static String getMessage(String name,
Object a1,
Object a2,
Object a3,
Object a4)
throws MissingResourceException {
return getMessage(name, new Object[] { a1, a2, a3, a4 });
} | Retrieves a message with four arguments. |
public JsonNode get(String str) {
String[] parts = str.split("\\.");
JsonNode retval = data.get(parts[0]);
for (int i = 1; i <= parts.length - 1; i++) {
if (retval == null) {
return null;
} else {
retval = retval.get(parts[i]);
}
}
return retval;
} | /*
Fetches values using the popular dot notation
i.e. tumblr.author.id would return the id (12345) value in a structure similar to
<pre>
{"tumblr" : {
"author" : {
"id" : 12345
}
}
}
</pre>
@param str a JSON dot notation string
@return null if a value doesn't exist for that key or the value |
static Class<?>[] getAncestors(Class<?> clazz) {
Set<Class<?>> classes = new HashSet<Class<?>>();
while (clazz != null) {
classes.add(clazz);
if (clazz.getSuperclass() != null) {
classes.add(clazz.getSuperclass());
}
classes.addAll(Arrays.asList(clazz.getInterfaces()));
for (Class<?> interfaze : clazz.getInterfaces()) {
classes.addAll(Arrays.asList(getAncestors(interfaze)));
}
clazz = clazz.getSuperclass();
}
return classes.toArray(new Class<?>[classes.size()]);
} | Get all subclasses and interfaces in whole class hierarchy |
@Override
public HttpFilterBuilder addFilter(RequestFilter<HttpRequest> filter) {
return httpFilterChainBuilder.addFilter(new NotHttpRequestFilter(filter));
} | {@inheritDoc} |
@POST
@Path( "/{id}" )
@Produces( "application/json" )
@Consumes( "application/json" )
public GreetingResponse greet( @Context HttpServletRequest httpRequest, @PathParam( "id" ) String id, @QueryParam( "opt" ) String
opt, GreetingRequest request ) {
GreetingResponse response = greet( httpRequest, request );
response.setGreeting( "Hello #" + id + ", " + request.getName() + "!" );
return response;
} | } |
public FutureData<PylonValidation> validate(String csdl) {
FutureData<PylonValidation> future = new FutureData<PylonValidation>();
URI uri = newParams().forURL(config.newAPIEndpointURI(VALIDATE));
JSONRequest request = config.http().postJSON(
uri, new PageReader(newRequestCallback(future, new PylonValidation(), config)))
.addField("csdl", csdl);
performRequest(future, request);
return future;
} | /*
Validate the given CSDL string. For information on this endpoint see documentation page:
http://dev.datasift.com/pylon/docs/api/pylon-api-endpoints/pylonvalidate
@param csdl the CSDL to validate
@return the results of the validation, use {@link com.datasift.client.core.Validation#isValid()} to check if
validation was successful |
public FutureData<PylonStream> compile(String csdl) {
FutureData<PylonStream> future = new FutureData<PylonStream>();
URI uri = newParams().forURL(config.newAPIEndpointURI(COMPILE));
JSONRequest request = config.http().postJSON(
uri, new PageReader(newRequestCallback(future, new PylonStream(), config)))
.addField("csdl", csdl);
performRequest(future, request);
return future;
} | /*
Compile a CSDL string to a stream hash to which you can later subscribe and receive interactions from.
For information on this endpoint see documentation page:
http://dev.datasift.com/pylon/docs/api/pylon-api-endpoints/pyloncompile
@param csdl the CSDL to compile
@return a stream object representing the DataSift compiled CSDL, use {@link com.datasift.client.core
.Stream#hash()}
to list the hash for the compiled CSDL |
public FutureData<PylonRecordingId> start(PylonStream stream, String name) {
if (stream == null || stream.hash.isEmpty()) {
throw new IllegalArgumentException("A valid hash is required to start a stream");
}
FutureData<PylonRecordingId> future = new FutureData<>();
URI uri = newParams().forURL(config.newAPIEndpointURI(START));
JSONRequest request = config.http()
.putJSON(uri, new PageReader(newRequestCallback(future, new PylonRecordingId(), config)))
.addField("hash", stream.hash)
.addField("name", name);
performRequest(future, request);
return future;
} | /*
Start a recording with the given hash & name. For information on this endpoint see documentation page:
http://dev.datasift.com/pylon/docs/api/pylon-api-endpoints/pylonstart
@param stream the stream hash
@param name a name for the subscription
@return a Datasift pylon recording id. See {@link com.datasift.client.pylon.PylonRecording.PylonRecordingId} |
public FutureData<DataSiftResult> stop(PylonRecordingId recordingId) {
if (recordingId == null || recordingId.getId() == null || recordingId.getId().isEmpty()) {
throw new IllegalArgumentException("A valid recording id is required to stop a recording");
}
FutureData<DataSiftResult> future = new FutureData<>();
URI uri = newParams().forURL(config.newAPIEndpointURI(STOP));
JSONRequest request = config.http()
.putJSON(uri, new PageReader(newRequestCallback(future, new BaseDataSiftResult(), config)))
.addField("id", recordingId.getId());
performRequest(future, request);
return future;
} | /*
Stop the stream with the given hash. For information on this endpoint see documentation page:
http://dev.datasift.com/pylon/docs/api/pylon-api-endpoints/pylonstop
@param recordingId the id for the recording to stop.
See {@link com.datasift.client.pylon.PylonRecording.PylonRecordingId}
@return a result which can be checked for success or failure, A status 204 indicates success,
or using {@link com.datasift.client.BaseDataSiftResult#isSuccessful()} |
public FutureData<PylonRecordingList> get(int page, int perPage) {
FutureData<PylonRecordingList> future = new FutureData<>();
ParamBuilder b = new ParamBuilder();
if (page > 0) {
b.put("page", page);
}
if (perPage > 0) {
b.put("per_page", perPage);
}
URI uri = b.forURL(config.newAPIEndpointURI(GET));
Request request = config.http().GET(uri,
new PageReader(newRequestCallback(future, new PylonRecordingList(), config)));
performRequest(future, request);
return future;
} | /*
Get the status of all recordings on page given. For information on this endpoint see documentation page:
http://dev.datasift.com/pylon/docs/api/pylon-api-endpoints/pylonget
@return the status of all recordings that are running or have run with stored data |
public FutureData<PylonRecording> get(PylonRecordingId recordingId) {
URI uri = newParams().put("id", recordingId.id).forURL(config.newAPIEndpointURI(GET));
FutureData<PylonRecording> future = new FutureData<>();
Request request = config.http().GET(uri,
new PageReader(newRequestCallback(future, new PylonRecording(), config)));
performRequest(future, request);
return future;
} | /*
Get the status of the recording with a given id. For information on this endpoint see documentation page:
http://dev.datasift.com/pylon/docs/api/pylon-api-endpoints/pylonget
@param recordingId id for the required recording.
See {@link com.datasift.client.pylon.PylonRecording.PylonRecordingId}
@return the status of the requested recording |
public FutureData<PylonResult> analyze(PylonQuery query) {
if (query == null) {
throw new IllegalArgumentException("A valid analyze request body is required to analyze a stream");
}
FutureData<PylonResult> future = new FutureData<PylonResult>();
URI uri = newParams().forURL(config.newAPIEndpointURI(ANALYZE));
try {
JSONRequest result = config.http()
.postJSON(uri, new PageReader(newRequestCallback(future, new PylonResult(), config)))
.setData(query);
performRequest(future, result);
} catch (JsonProcessingException ex) {
throw new IllegalArgumentException("Valid JSON is required to analyze a stream");
}
return future;
} | /*
Analyze a given recording and retrieve results. For information on this endpoint see documentation page:
http://dev.datasift.com/pylon/docs/api/pylon-api-endpoints/pylonanalyze
@param query pylon options for a recording
@return information on execution of a recording |
public FutureData<PylonTags> tags(PylonRecordingId recordingId) {
URI uri = newParams().put("id", recordingId.id).forURL(config.newAPIEndpointURI(TAGS));
FutureData<PylonTags> future = new FutureData<>();
Request request = config.http().GET(uri,
new PageReader(newRequestCallback(future, new PylonTags(), config)));
performRequest(future, request);
return future;
} | /*
Retrieve VEDO tags for a given recording. For information on this endpoint see documentation page:
http://dev.datasift.com/pylon/docs/api/pylon-api-endpoints/pylontags
@param recordingId A recording id. See {@link com.datasift.client.pylon.PylonRecording.PylonRecordingId}
@return vedo tags for the given filter |
public FutureData<PylonSample> sample(PylonSampleRequest sampleRequest) {
if (sampleRequest == null || sampleRequest.recordingId == null) {
throw new IllegalArgumentException(
"A valid sample request object containing a recordingId is required to carry out a Pylon sample"
);
}
FutureData<PylonSample> future = new FutureData<>();
URI uri = newParams().forURL(config.newAPIEndpointURI(SAMPLE));
try {
JSONRequest result = config.http()
.postJSON(uri, new PageReader(newRequestCallback(future, new PylonSample(), config)))
.setData(sampleRequest);
performRequest(future, result);
} catch (JsonProcessingException ex) {
throw new IllegalArgumentException("Valid JSON is required to analyze a stream");
}
return future;
} | /*
Sample a Pylon recording. For information on this endpoint see documentation page:
http://dev.datasift.com/pylon/docs/api/pylon-api-endpoints/pylonsample
@param sampleRequest Request object containing parameters for Sample
@return PylonSample object containing results of sampling |
public Yammer addOAutToken(String oAuthAccessToken, long expires, String name) {
if (oAuthAccessToken == null || oAuthAccessToken.isEmpty()) {
throw new IllegalArgumentException("A valid OAuth and refresh token is required");
}
AuthParams parameterSet = newAuthParams(name, expires);
parameterSet.set("value", oAuthAccessToken);
return this;
} | /*
Adds an OAuth token to the managed source
@param oAuthAccessToken an oauth2 token
@param name a human friendly name for this auth token
@param expires identity resource expiry date/time as a UTC timestamp, i.e. when the token expires
@return this |
private void resizeBuffer() {
if (buffer == null) {
buffer = new byte[getDefaultBufferSize()];
pos = 0;
readPos = 0;
} else {
byte[] b = new byte[buffer.length * DEFAULT_BUFFER_RESIZE_FACTOR];
System.arraycopy(buffer, 0, b, 0, buffer.length);
buffer = b;
}
} | Increases our buffer by the {@link #DEFAULT_BUFFER_RESIZE_FACTOR}. |
private TypeSpec generateBuilder( RestService restService ) {
TypeSpec.Builder typeBuilder = TypeSpec.classBuilder( restService.getBuilderSimpleClassName() )
.addModifiers( Modifier.PUBLIC, Modifier.FINAL )
.addJavadoc( "Generated REST service builder for {@link $L}.\n", restService.getTypeElement().getQualifiedName() )
.addMethod( MethodSpec.constructorBuilder().addModifiers( Modifier.PRIVATE ).build() );
Map<TypeMirror, MethodSpec> mapperGetters = buildMappers( typeBuilder, restService );
for ( RestServiceMethod method : restService.getMethods() ) {
buildMethod( typeBuilder, mapperGetters, method );
}
return typeBuilder.build();
} | Generate the rest service builder
@param restService The rest service |
public void note( Element e, String msg, Object... args ) {
messager.printMessage( Diagnostic.Kind.NOTE, String.format( msg, args ), e );
} | Prints a note message
@param e The element which has caused the error. Can be null
@param msg The error message
@param args if the error message contains %s, %d etc. placeholders this arguments will be used
to replace them |
public void warn( Element e, String msg, Object... args ) {
messager.printMessage( Diagnostic.Kind.WARNING, String.format( msg, args ), e );
} | Prints a warning message
@param e The element which has caused the error. Can be null
@param msg The error message
@param args if the error message contains %s, %d etc. placeholders this arguments will be used
to replace them |
public void error( Element e, String msg, Object... args ) {
messager.printMessage( Diagnostic.Kind.ERROR, String.format( msg, args ), e );
} | Prints an error message
@param e The element which has caused the error. Can be null
@param msg The error message
@param args if the error message contains %s, %d etc. placeholders this arguments will be used
to replace them |
public static void detectDeadConnections() {
if (CONNECTION_DETECTOR_RUNNING) {
return;
}
final Logger log = LoggerFactory.getLogger(ConnectionManager.class);
Thread deadConnThread = new Thread(new Runnable() {
public void run() {
CONNECTION_DETECTOR_RUNNING = true;
try {
log.debug("Starting dead connection detection thread");
while (detectDeadConnection) {
log.debug("Checking if there are any dead connections");
long now = DateTime.now().getMillis();
for (DataSiftConnection data : connections) {
if (data.lastSeen() != null) {
if (now - data.lastSeen().getMillis() >=
TimeUnit.SECONDS.toMillis(CONNECTION_TIMEOUT_LIMIT)) {
log.info("Dead connection found, triggering re-connection");
data.closeAndReconnect();
}
}
}
log.debug(String.format("Checked %s connections", connections.size()));
try {
Thread.sleep(TimeUnit.SECONDS.toMillis(CONNECTION_TIMEOUT));
} catch (InterruptedException e) {
LoggerFactory.getLogger(getClass()).info("Interrupted while waiting to check conn");
}
}
} finally {
CONNECTION_DETECTOR_RUNNING = false;
}
}
}, "dead-connections-monitor");
deadConnThread.setDaemon(true);
deadConnThread.start();
} | /*
Starts a thread which checks periodically for dead connections and force them to re-connect.
IF AND ONLY IF a thread isn't already running to do this |
public ConnectionManager onError(ErrorListener listener) {
this.errorListener = listener;
for (DataSiftConnection conn : connections) {
conn.setErrorListener(listener);
}
return this;
} | /*
Subscribes a callback to listen for exceptions that may occur during streaming.
When exceptions occur it is unlikely we'll know which stream/subscription caused the exception
so instead of notifying all stream subscribers of the same exception this provides a way to list
the error just once
@param listener an error callback |
public <T extends DataSource> FutureData<ManagedSource> create(String name, T source) {
return updateOrCreate(name, source, null);
} | /*
Create a new managed source
@param name the name of the source
@param source the source and its configuratiosn
@return this |
public FutureData<ManagedSource> addAuth(String id, boolean validate, String... resources) {
if (id == null || resources == null || resources.length == 0) {
throw new IllegalArgumentException("ID and a resource is required");
}
FutureData<ManagedSource> future = new FutureData<>();
URI uri = newParams().forURL(config.newAPIEndpointURI(ADD_AUTH));
try {
List<BaseSource.ResourceParams> list = new ArrayList<>();
for (String val : resources) {
BaseSource.ResourceParams params = new ManagedSource.ResourceParams();
params.set("value", val);
list.add(params);
}
POST request = config.http().POST(uri, new PageReader(newRequestCallback(future,
new ManagedSource(), config)))
.form("id", id)
.form("auth", DataSiftClient.MAPPER.writeValueAsString(list))
.form("validate", DataSiftClient.MAPPER.writeValueAsString(Arrays.asList(validate)));
performRequest(future, request);
} catch (JsonProcessingException jpe) {
future.interuptCause(jpe);
future.doNotify();
}
return future;
} | /*
Add one or more authentication credentials to a given managed source
@param id the ID of the source
@param validate if true each token is validated
@param resources a set of tokens
@return the source |
public FutureData<ManagedSource> removeResource(String id, String... resources) {
if (id == null || resources == null || resources.length == 0) {
throw new IllegalArgumentException("ID and oen or more resources are required");
}
FutureData<ManagedSource> future = new FutureData<>();
URI uri = newParams().forURL(config.newAPIEndpointURI(REMOVE_RESOURCE));
POST request = null;
try {
request = config.http().POST(uri, new PageReader(newRequestCallback(future, new ManagedSource(), config)))
.form("id", id)
.form("resource_ids", DataSiftClient.MAPPER.writeValueAsString(resources));
} catch (JsonProcessingException e) {
future.interuptCause(e);
future.doNotify();
}
performRequest(future, request);
return future;
} | /*
Remove a set of resources from a managed source
@param id the ID of the managed source
@param resources the resources to remove
@return the managed source |
public <T extends DataSource> FutureData<ManagedSource> update(String name, T source, ManagedSource id) {
return update(name, source, id, false);
} | /*
Update an existing managed source
@param name the name of the source
@param source the source and its configuration
@return this |
public FutureData<DataSiftResult> start(final FutureData<ManagedSource> source) {
if (source == null) {
throw new IllegalArgumentException("A data source is required");
}
final FutureData<DataSiftResult> future = new FutureData<>();
final DataSiftResult res = new BaseDataSiftResult();
unwrapFuture(source, future, res, new FutureResponse<ManagedSource>() {
public void apply(ManagedSource data) {
URI uri = newParams().forURL(config.newAPIEndpointURI(START));
POST request = config.http().POST(uri, new PageReader(newRequestCallback(future, data, config)))
.form("id", data.getId());
performRequest(future, request);
}
});
return future;
} | /*
@param source start a configured managed source
@return this |
public FutureData<ManagedSourceList> get(ManagedDataSourceType type, int page, int perPage) {
FutureData<ManagedSourceList> future = new FutureData<>();
ParamBuilder b = newParams();
if (type != null) {
b.put("source_type", type.value());
}
if (page > 0) {
b.put("page", page);
}
if (perPage > 0) {
b.put("per_page", perPage);
}
URI uri = b.forURL(config.newAPIEndpointURI(GET));
Request request = config.http().
GET(uri, new PageReader(newRequestCallback(future, new ManagedSourceList(), config)));
performRequest(future, request);
return future;
} | /*
Get manage sources for the given type |
public FutureData<ManagedSource> get(String id) {
FutureData<ManagedSource> future = new FutureData<>();
URI uri = newParams().put("id", id).forURL(config.newAPIEndpointURI(GET));
Request request = config.http().
GET(uri, new PageReader(newRequestCallback(future, new ManagedSource(), config)));
performRequest(future, request);
return future;
} | /*
@param id the ID of the managed source to fetch
@return the managed source for the ID provided |
public FacebookPage addPage(String id, String url, String title) {
ResourceParams parameterSet = newResourceParams();
parameterSet.set("id", id);
parameterSet.set("url", url);
parameterSet.set("title", title);
return this;
} | /*
Adds information about a single facebook page
@param id the id of the facebook page
@param url the facebook page's URL
@param title the page's title
@return this |
public FacebookPage addInstagramLinkedPage(String pageid) {
ResourceParams parameterSet = newResourceParams();
parameterSet.set("id", pageid);
parameterSet.set("type", "instagram");
return this;
} | Add a facebook page to be crawled for instagram content
@param pageid the ID of the page, usually numerical
@return |
public FacebookPage addInstagramUser(String username) {
ResourceParams parameterSet = newResourceParams();
parameterSet.set("id", username);
parameterSet.set("type", "instagram_user");
return this;
} | Add an instagram user to be crawled for content
@param username ID of the user to be checked, usually a textual name
@return |
@Override
public WarpResult execute(Activity activity, WarpContext warpContext) {
try {
setupServerInspection();
executeActivity(activity);
awaitServerExecution(warpContext);
checkActivityFailure();
return warpContext.getResult();
} finally {
cleanup();
}
} | /*
(non-Javadoc)
@see org.jboss.arquillian.warp.impl.client.execution.WarpExecutor#execute(org.jboss.arquillian.warp.Activity, org.jboss.arquillian.warp.impl.client.execution.WarpContext) |
public Http format(HttpFormat format) {
String strFormat;
switch (format) {
case JSON_ARRAY:
strFormat = "json_array";
break;
case JSON_NEW_LINE:
strFormat = "json_new_line";
break;
default:
case JSON_META:
strFormat = "json_meta";
break;
}
return setParam("format", strFormat);
} | /*
The output format for your data:
<p/>
json_meta - The current default format, where each payload contains a full JSON document. It contains metadata
and an "interactions" property that has an array of interactions.
<p/>
json_array - The payload is a full JSON document, but just has an array of interactions.
<p/>
json_new_line - The payload is NOT a full JSON document. Each interaction is flattened and separated by a line
break.
If you omit this parameter or set it to json_meta, your output consists of JSON metadata followed by a JSON
array of interactions (wrapped in square brackets and separated by commas).
Take a look at our Sample Output for File-Based Connectors page.
If you select json_array, DataSift omits the metadata and sends just the array of interactions.
If you select json_new_line, DataSift omits the metadata and sends each interaction as a single JSON object.
@return this |
public Http auth(String username, String password) {
return username(username).password(password);
} | /*
Sets the authentication information that should be used for the connector
@param username the username
@param password the password
@return this |
public FutureData<DataSiftResult> start(FutureData<PreparedHistoricsQuery> query) {
if (query == null) {
throw new IllegalArgumentException("A valid PreparedHistoricsQuery is required");
}
final FutureData<DataSiftResult> future = new FutureData<>();
DataSiftResult h = new BaseDataSiftResult();
FutureResponse<PreparedHistoricsQuery> r = new FutureResponse<PreparedHistoricsQuery>() {
public void apply(PreparedHistoricsQuery data) {
if (data.getId() == null || data.getId().isEmpty()) {
throw new IllegalArgumentException("A valid PreparedHistoricsQuery is required");
}
start(data.getId(), future);
}
};
unwrapFuture(query, future, h, r);
return future;
} | /*
Start the historics query given
@return a result which can be checked for success or failure, A status 204 indicates success,
or using {@link com.datasift.client.BaseDataSiftResult#isSuccessful()} |
public FutureData<HistoricsStatus> status(DateTime start, DateTime end, String... sources) {
FutureData<HistoricsStatus> future = new FutureData<>();
URI uri = newParams().forURL(config.newAPIEndpointURI(STATUS));
POST request = config.http()
.POST(uri, new PageReader(newRequestCallback(future, new HistoricsStatus(), config)))
.form("start", MILLISECONDS.toSeconds(start.getMillis()))
.form("end", MILLISECONDS.toSeconds(end.getMillis()));
if (sources != null && sources.length > 0) {
StringBuilder b = new StringBuilder();
for (String source : sources) {
b.append(source).append(",");
}
request.form("sources", b.toString().substring(0, b.length() - 1));
}
performRequest(future, request);
return future;
} | /*
Check the status of data availability in our archive for the given time period
@param start the dat from which the archive should be checked
@param end the up to which the archive should be checked
@param sources an optional list of data sources that should be queried, e.g. [tumblr,facebook,...]
@return a report of the current status/availability of data for the given time period |
public FutureData<HistoricsQueryList> list(int max, int page, boolean withEstimate) {
FutureData<HistoricsQueryList> future = new FutureData<>();
URI uri = newParams().forURL(config.newAPIEndpointURI(GET));
POST request = config.http()
.POST(uri, new PageReader(newRequestCallback(future, new HistoricsQueryList(), config)))
.form("with_estimate", withEstimate ? 1 : 0);
if (max > 0) {
request.form("max", max);
}
if (page > 0) {
request.form("page", page);
}
performRequest(future, request);
return future;
} | /*
Retrieve a list of {@link HistoricsQuery} objects
@param max max number of objects to list
@param page a page number
@param withEstimate if true, include an estimated completion time
@return an iterable list of {@link HistoricsQuery}s |
public FutureData<PreparedHistoricsQuery> prepare(String hash, DateTime start, DateTime end, String name
, String... sources) {
return prepare(hash, MILLISECONDS.toSeconds(start.getMillis()), MILLISECONDS.toSeconds(end.getMillis()),
name, -1, sources);
} | /*
@param hash The hash of the CSDL for your historics query.
Example values: 2459b03a13577579bca76471778a5c3d
@param start Unix timestamp for the start time.
Example values: 1325548800
@param end nix timestamp for the end time. Must be at least 24 in the past.
Example values: 1325548800
@param name The name you assign to your historics query.
Example values: Football
@return the prepared Historics |
public FutureData<PreparedHistoricsQuery> prepare(String hash, long start, long end, String name,
String... sources) {
return prepare(hash, start, end, name, -1, sources);
} | /*
@param hash The hash of the CSDL for your historics query.
Example values: 2459b03a13577579bca76471778a5c3d
@param start Unix timestamp for the start time.
Example values: 1325548800
@param end nix timestamp for the end time. Must be at least 24 in the past.
Example values: 1325548800
@param name The name you assign to your historics query.
Example values: Football
@param sources Comma-separated list of data sources to include.
Example values: tumblr
@return the prepared historics |
private <T> T execute(String url, Class<T> returnType, Object requestObject) throws Exception {
URLConnection connection = new URL(url).openConnection();
if (!(connection instanceof HttpURLConnection)) {
throw new IllegalStateException("Not an http connection! " + connection);
}
HttpURLConnection httpConnection = (HttpURLConnection) connection;
httpConnection.setUseCaches(false);
httpConnection.setDefaultUseCaches(false);
httpConnection.setDoInput(true);
/*
* With followRedirects enabled, simple URL redirects work as expected. But with port redirects (http->https)
* followRedirects doesn't work and a HTTP 302 code is returned instead (ARQ-1365).
*
* In order to handle all redirects in one place, followRedirects is set to false and all HTTP 302 response codes are
* treated accordingly within the execute method.
*/
httpConnection.setInstanceFollowRedirects(false);
try {
if (requestObject != null) {
httpConnection.setRequestMethod("POST");
httpConnection.setDoOutput(true);
httpConnection.setRequestProperty("Content-Type", "application/octet-stream");
}
if (requestObject != null) {
ObjectOutputStream ous = new ObjectOutputStream(httpConnection.getOutputStream());
try {
ous.writeObject(requestObject);
} catch (Exception e) {
throw new RuntimeException("Error sending request Object, " + requestObject, e);
} finally {
ous.flush();
ous.close();
}
}
try {
httpConnection.getResponseCode();
} catch (ConnectException e) {
return null; // Could not connect
}
if (httpConnection.getResponseCode() == HttpURLConnection.HTTP_OK) {
ObjectInputStream ois = new ObjectInputStream(httpConnection.getInputStream());
Object o;
try {
o = ois.readObject();
} finally {
ois.close();
}
if (!returnType.isInstance(o)) {
throw new IllegalStateException(
"Error reading results, expected a " + returnType.getName() + " but got "
+ o);
}
return returnType.cast(o);
} else if (httpConnection.getResponseCode() == HttpURLConnection.HTTP_NO_CONTENT) {
return null;
} else if (httpConnection.getResponseCode() == HttpURLConnection.HTTP_MOVED_TEMP) {
String redirectUrl = httpConnection.getHeaderField("Location");
return execute(redirectUrl, returnType, requestObject);
} else if (httpConnection.getResponseCode() != HttpURLConnection.HTTP_NOT_FOUND) {
throw new IllegalStateException("Error launching test at " + url + ". " + "Got "
+ httpConnection.getResponseCode() + " (" + httpConnection.getResponseMessage() + ")");
}
} finally {
httpConnection.disconnect();
}
return null;
} | Executes the request to the remote url |
private Grammar getEqualGrammar(Grammar gr) {
for (Grammar gx : this.grammars) {
if (gr == gx) {
// System.out.println("Exactly the same grammar found: " + gx);
return gx;
}
if (isSameGrammarType(gr, gx)) {
// type of grammar the same
if (gr.getNumberOfEvents() == gx.getNumberOfEvents()) {
List<Grammar> handled = new ArrayList<Grammar>();
if (isEqualGrammar(gr, gx, handled)) {
// the same grammar found
// System.out.println("Same grammar found for: " + gx +
// " == " + gr);
return gx;
}
}
}
}
return null;
} | != null ... found equal grammar |
final int init(Info vi, boolean encp) {
this.vi = vi;
modebits = Util.ilog2(vi.modes);
transform[0] = new Object[VI_TRANSFORMB];
transform[1] = new Object[VI_TRANSFORMB];
// MDCT is tranform 0
transform[0][0] = new Mdct();
transform[1][0] = new Mdct();
((Mdct) transform[0][0]).init(vi.blocksizes[0]);
((Mdct) transform[1][0]).init(vi.blocksizes[1]);
window[0][0][0] = new float[VI_WINDOWB][];
window[0][0][1] = window[0][0][0];
window[0][1][0] = window[0][0][0];
window[0][1][1] = window[0][0][0];
window[1][0][0] = new float[VI_WINDOWB][];
window[1][0][1] = new float[VI_WINDOWB][];
window[1][1][0] = new float[VI_WINDOWB][];
window[1][1][1] = new float[VI_WINDOWB][];
for (int i = 0; i < VI_WINDOWB; i++) {
window[0][0][0][i] = window(i, vi.blocksizes[0], vi.blocksizes[0] / 2,
vi.blocksizes[0] / 2);
window[1][0][0][i] = window(i, vi.blocksizes[1], vi.blocksizes[0] / 2,
vi.blocksizes[0] / 2);
window[1][0][1][i] = window(i, vi.blocksizes[1], vi.blocksizes[0] / 2,
vi.blocksizes[1] / 2);
window[1][1][0][i] = window(i, vi.blocksizes[1], vi.blocksizes[1] / 2,
vi.blocksizes[0] / 2);
window[1][1][1][i] = window(i, vi.blocksizes[1], vi.blocksizes[1] / 2,
vi.blocksizes[1] / 2);
}
fullbooks = new CodeBook[vi.books];
for (int i = 0; i < vi.books; i++) {
fullbooks[i] = new CodeBook();
fullbooks[i].init_decode(vi.book_param[i]);
}
// initialize the storage vectors to a decent size greater than the
// minimum
pcm_storage = 8192; // we'll assume later that we have
// a minimum of twice the blocksize of
// accumulated samples in analysis
pcm = new float[vi.channels][];
{
for (int i = 0; i < vi.channels; i++) {
pcm[i] = new float[pcm_storage];
}
}
// all 1 (large block) or 0 (small block)
// explicitly set for the sake of clarity
lW = 0; // previous window size
W = 0; // current window size
// all vector indexes; multiples of samples_per_envelope_step
centerW = vi.blocksizes[1] / 2;
pcm_current = centerW;
// initialize all the mapping/backend lookups
mode = new Object[vi.modes];
for (int i = 0; i < vi.modes; i++) {
int mapnum = vi.mode_param[i].mapping;
int maptype = vi.map_type[mapnum];
mode[i] = FuncMapping.mapping_P[maptype].look(this, vi.mode_param[i],
vi.map_param[mapnum]);
}
return (0);
} | The init is here because some of it is shared |
@Override
public AudioFileFormat getAudioFileFormat(File file)
throws UnsupportedAudioFileException, IOException {
LOG.log(Level.FINE, "TAudioFileReader.getAudioFileFormat(File): begin (class: {0})", getClass().getSimpleName());
long lFileLengthInBytes = file.length();
AudioFileFormat audioFileFormat;
try (InputStream inputStream = new FileInputStream(file)) {
audioFileFormat = getAudioFileFormat(inputStream, lFileLengthInBytes);
}
LOG.log(Level.FINE, "TAudioFileReader.getAudioFileFormat(File): end");
return audioFileFormat;
} | Get an AudioFileFormat object for a File. This method calls
getAudioFileFormat(InputStream, long). Subclasses should not override
this method unless there are really severe reasons. Normally, it is
sufficient to implement getAudioFileFormat(InputStream, long).
@param file the file to read from.
@return an AudioFileFormat instance containing information from the
header of the file passed in.
@throws javax.sound.sampled.UnsupportedAudioFileException
@throws java.io.IOException |
@Override
public AudioFileFormat getAudioFileFormat(URL url)
throws UnsupportedAudioFileException, IOException {
LOG.log(Level.FINE, "TAudioFileReader.getAudioFileFormat(URL): begin (class: {0})", getClass().getSimpleName());
long lFileLengthInBytes = getDataLength(url);
AudioFileFormat audioFileFormat;
try (InputStream inputStream = url.openStream()) {
audioFileFormat = getAudioFileFormat(inputStream, lFileLengthInBytes);
}
LOG.log(Level.FINE, "TAudioFileReader.getAudioFileFormat(URL): end");
return audioFileFormat;
} | Get an AudioFileFormat object for a URL. This method calls
getAudioFileFormat(InputStream, long). Subclasses should not override
this method unless there are really severe reasons. Normally, it is
sufficient to implement getAudioFileFormat(InputStream, long).
@param url the URL to read from.
@return an AudioFileFormat instance containing information from the
header of the URL passed in.
@throws javax.sound.sampled.UnsupportedAudioFileException
@throws java.io.IOException |
@Override
public AudioFileFormat getAudioFileFormat(InputStream inputStream)
throws UnsupportedAudioFileException, IOException {
LOG.log(Level.FINE, "TAudioFileReader.getAudioFileFormat(InputStream): begin (class: {0})", getClass().getSimpleName());
long lFileLengthInBytes = AudioSystem.NOT_SPECIFIED;
if (!inputStream.markSupported()) {
inputStream = new BufferedInputStream(inputStream, getMarkLimit());
}
inputStream.mark(getMarkLimit());
AudioFileFormat audioFileFormat;
try {
audioFileFormat = getAudioFileFormat(inputStream, lFileLengthInBytes);
} finally {
/* TODO: required semantics is unclear: should reset()
be executed only when there is an exception or
should it be done always?
*/
inputStream.reset();
}
LOG.log(Level.FINE, "TAudioFileReader.getAudioFileFormat(InputStream): end");
return audioFileFormat;
} | Get an AudioFileFormat object for an InputStream. This method calls
getAudioFileFormat(InputStream, long). Subclasses should not override
this method unless there are really severe reasons. Normally, it is
sufficient to implement getAudioFileFormat(InputStream, long).
@param inputStream the stream to read from.
@return an AudioFileFormat instance containing information from the
header of the stream passed in.
@throws javax.sound.sampled.UnsupportedAudioFileException
@throws java.io.IOException |
@Override
public AudioInputStream getAudioInputStream(File file)
throws UnsupportedAudioFileException, IOException {
LOG.log(Level.FINE, "TAudioFileReader.getAudioInputStream(File): begin (class: {0})", getClass().getSimpleName());
long lFileLengthInBytes = file.length();
InputStream inputStream = new FileInputStream(file);
AudioInputStream audioInputStream;
try {
audioInputStream = getAudioInputStream(inputStream, lFileLengthInBytes);
} catch (UnsupportedAudioFileException | IOException e) {
inputStream.close();
throw e;
}
LOG.log(Level.FINE, "TAudioFileReader.getAudioInputStream(File): end");
return audioInputStream;
} | Get an AudioInputStream object for a file. This method calls
getAudioInputStream(InputStream, long). Subclasses should not override
this method unless there are really severe reasons. Normally, it is
sufficient to implement getAudioFileFormat(InputStream, long) and perhaps
override getAudioInputStream(InputStream, long).
@param file the File object to read from.
@return an AudioInputStream instance containing the audio data from this
file.
@throws javax.sound.sampled.UnsupportedAudioFileException
@throws java.io.IOException |
@Override
public AudioInputStream getAudioInputStream(URL url)
throws UnsupportedAudioFileException, IOException {
LOG.log(Level.FINE, "TAudioFileReader.getAudioInputStream(URL): begin (class: {0})", getClass().getSimpleName());
long lFileLengthInBytes = getDataLength(url);
InputStream inputStream = url.openStream();
AudioInputStream audioInputStream = null;
try {
audioInputStream = getAudioInputStream(inputStream, lFileLengthInBytes);
} catch (UnsupportedAudioFileException | IOException e) {
inputStream.close();
throw e;
}
LOG.log(Level.FINE, "TAudioFileReader.getAudioInputStream(URL): end");
return audioInputStream;
} | Get an AudioInputStream object for a URL. This method calls
getAudioInputStream(InputStream, long). Subclasses should not override
this method unless there are really severe reasons. Normally, it is
sufficient to implement getAudioFileFormat(InputStream, long) and perhaps
override getAudioInputStream(InputStream, long).
@param url the URL to read from.
@return an AudioInputStream instance containing the audio data from this
URL.
@throws javax.sound.sampled.UnsupportedAudioFileException
@throws java.io.IOException |
@Override
public AudioInputStream getAudioInputStream(InputStream inputStream)
throws UnsupportedAudioFileException, IOException {
LOG.log(Level.FINE, "TAudioFileReader.getAudioInputStream(InputStream): begin (class: {0})", getClass().getSimpleName());
long lFileLengthInBytes = AudioSystem.NOT_SPECIFIED;
AudioInputStream audioInputStream = null;
if (!inputStream.markSupported()) {
inputStream = new BufferedInputStream(inputStream, getMarkLimit());
}
inputStream.mark(getMarkLimit());
try {
audioInputStream = getAudioInputStream(inputStream, lFileLengthInBytes);
} catch (UnsupportedAudioFileException e) {
inputStream.reset();
throw e;
} catch (IOException e) {
try {
inputStream.reset();
} catch (IOException e2) {
if (e2.getCause() == null) {
e2.initCause(e);
throw e2;
}
}
throw e;
}
LOG.log(Level.FINE, "TAudioFileReader.getAudioInputStream(InputStream): end");
return audioInputStream;
} | Get an AudioInputStream object for an InputStream. This method calls
getAudioInputStream(InputStream, long). Subclasses should not override
this method unless there are really severe reasons. Normally, it is
sufficient to implement getAudioFileFormat(InputStream, long) and perhaps
override getAudioInputStream(InputStream, long).
@param inputStream the stream to read from.
@return an AudioInputStream instance containing the audio data from this
stream.
@throws javax.sound.sampled.UnsupportedAudioFileException
@throws java.io.IOException |
protected AudioInputStream getAudioInputStream(InputStream inputStream,
long lFileLengthInBytes) throws UnsupportedAudioFileException,
IOException {
LOG.log(Level.FINE, "TAudioFileReader.getAudioInputStream(InputStream, long): begin (class: {0})", getClass().getSimpleName());
if (isRereading()) {
if (!inputStream.markSupported()) {
inputStream = new BufferedInputStream(inputStream,
getMarkLimit());
}
inputStream.mark(getMarkLimit());
}
AudioFileFormat audioFileFormat = getAudioFileFormat(inputStream,
lFileLengthInBytes);
if (isRereading()) {
inputStream.reset();
}
AudioInputStream audioInputStream = new AudioInputStream(inputStream,
audioFileFormat.getFormat(), audioFileFormat.getFrameLength());
LOG.log(Level.FINE, "TAudioFileReader.getAudioInputStream(InputStream, long): end");
return audioInputStream;
} | Get an AudioInputStream (internal implementation). This implementation
calls getAudioFileFormat() with the same arguments as passed in here.
Then, it constructs an AudioInputStream instance. This instance takes the
passed inputStream in the state it is left after getAudioFileFormat() did
its work. In other words, the implementation here assumes that
getAudioFileFormat() reads the entire header up to a position exactly
where the audio data starts. If this can't be realized for a certain
format, this method should be overridden.
@param inputStream The InputStream to read from. It should be tested if
it is markable. If not, and it is re-reading, wrap it into a
BufferedInputStream with getMarkLimit() size.
@param lFileLengthInBytes The size of the originating file, if known. If
it isn't known, AudioSystem.NOT_SPECIFIED should be passed. This value
may be used for byteLength in AudioFileFormat, if this value can't be
derived from the information in the file header.
@return
@throws javax.sound.sampled.UnsupportedAudioFileException
@throws java.io.IOException |
private synchronized void compile(Token tok) {
if (this.operations != null)
return;
this.numberOfClosures = 0;
this.operations = this.compile(tok, null, false);
} | Compiles a token tree into an operation flow. |
private Op compile(Token tok, Op next, boolean reverse) {
Op ret;
switch (tok.type) {
case Token.DOT:
ret = Op.createDot();
ret.next = next;
break;
case Token.CHAR:
ret = Op.createChar(tok.getChar());
ret.next = next;
break;
case Token.ANCHOR:
ret = Op.createAnchor(tok.getChar());
ret.next = next;
break;
case Token.RANGE:
case Token.NRANGE:
ret = Op.createRange(tok);
ret.next = next;
break;
case Token.CONCAT:
ret = next;
if (!reverse) {
for (int i = tok.size() - 1; i >= 0; i--) {
ret = compile(tok.getChild(i), ret, false);
}
} else {
for (int i = 0; i < tok.size(); i++) {
ret = compile(tok.getChild(i), ret, true);
}
}
break;
case Token.UNION:
Op.UnionOp uni = Op.createUnion(tok.size());
for (int i = 0; i < tok.size(); i++) {
uni.addElement(compile(tok.getChild(i), next, reverse));
}
ret = uni; // ret.next is null.
break;
case Token.CLOSURE:
case Token.NONGREEDYCLOSURE:
Token child = tok.getChild(0);
int min = tok.getMin();
int max = tok.getMax();
if (min >= 0 && min == max) { // {n}
ret = next;
for (int i = 0; i < min; i++) {
ret = compile(child, ret, reverse);
}
break;
}
if (min > 0 && max > 0)
max -= min;
if (max > 0) {
// X{2,6} -> XX(X(X(XX?)?)?)?
ret = next;
for (int i = 0; i < max; i++) {
Op.ChildOp q = Op
.createQuestion(tok.type == Token.NONGREEDYCLOSURE);
q.next = next;
q.setChild(compile(child, ret, reverse));
ret = q;
}
} else {
Op.ChildOp op;
if (tok.type == Token.NONGREEDYCLOSURE) {
op = Op.createNonGreedyClosure();
} else { // Token.CLOSURE
op = Op.createClosure(this.numberOfClosures++);
}
op.next = next;
op.setChild(compile(child, op, reverse));
ret = op;
}
if (min > 0) {
for (int i = 0; i < min; i++) {
ret = compile(child, ret, reverse);
}
}
break;
case Token.EMPTY:
ret = next;
break;
case Token.STRING:
ret = Op.createString(tok.getString());
ret.next = next;
break;
case Token.BACKREFERENCE:
ret = Op.createBackReference(tok.getReferenceNumber());
ret.next = next;
break;
case Token.PAREN:
if (tok.getParenNumber() == 0) {
ret = compile(tok.getChild(0), next, reverse);
} else if (reverse) {
next = Op.createCapture(tok.getParenNumber(), next);
next = compile(tok.getChild(0), next, reverse);
ret = Op.createCapture(-tok.getParenNumber(), next);
} else {
next = Op.createCapture(-tok.getParenNumber(), next);
next = compile(tok.getChild(0), next, reverse);
ret = Op.createCapture(tok.getParenNumber(), next);
}
break;
case Token.LOOKAHEAD:
ret = Op.createLook(Op.LOOKAHEAD, next,
compile(tok.getChild(0), null, false));
break;
case Token.NEGATIVELOOKAHEAD:
ret = Op.createLook(Op.NEGATIVELOOKAHEAD, next,
compile(tok.getChild(0), null, false));
break;
case Token.LOOKBEHIND:
ret = Op.createLook(Op.LOOKBEHIND, next,
compile(tok.getChild(0), null, true));
break;
case Token.NEGATIVELOOKBEHIND:
ret = Op.createLook(Op.NEGATIVELOOKBEHIND, next,
compile(tok.getChild(0), null, true));
break;
case Token.INDEPENDENT:
ret = Op.createIndependent(next,
compile(tok.getChild(0), null, reverse));
break;
case Token.MODIFIERGROUP:
ret = Op.createModifier(next,
compile(tok.getChild(0), null, reverse),
((Token.ModifierToken) tok).getOptions(),
((Token.ModifierToken) tok).getOptionsMask());
break;
case Token.CONDITION:
Token.ConditionToken ctok = (Token.ConditionToken) tok;
int ref = ctok.refNumber;
Op condition = ctok.condition == null ? null : compile(
ctok.condition, null, reverse);
Op yes = compile(ctok.yes, next, reverse);
Op no = ctok.no == null ? null : compile(ctok.no, next, reverse);
ret = Op.createCondition(next, ref, condition, yes, no);
break;
default:
throw new RuntimeException("Unknown token type: " + tok.type);
} // switch (tok.type)
return ret;
} | Converts a token to an operation. |
public boolean matches(String target, int start, int end) {
return this.matches(target, start, end, (Match) null);
} | Checks whether the <var>target</var> text <strong>contains</strong> this
pattern in specified range or not.
@param start
Start offset of the range.
@param end
End offset +1 of the range.
@return true if the target is matched to this regular expression. |
public boolean matches(String target, Match match) {
return this.matches(target, 0, target.length(), match);
} | Checks whether the <var>target</var> text <strong>contains</strong> this
pattern or not.
@param match
A Match instance for storing matching result.
@return Offset of the start position in <VAR>target</VAR>; or -1 if not
match. |
public boolean matches(CharacterIterator target, Match match) {
int start = target.getBeginIndex();
int end = target.getEndIndex();
synchronized (this) {
if (this.operations == null)
this.prepare();
if (this.context == null)
this.context = new Context();
}
Context con = null;
synchronized (this.context) {
con = this.context.inuse ? new Context() : this.context;
con.reset(target, start, end, this.numberOfClosures);
}
if (match != null) {
match.setNumberOfGroups(this.nofparen);
match.setSource(target);
} else if (this.hasBackReferences) {
match = new Match();
match.setNumberOfGroups(this.nofparen);
// Need not to call setSource() because
// a caller can not access this match instance.
}
con.match = match;
if (RegularExpression.isSet(this.options, XMLSCHEMA_MODE)) {
int matchEnd = this.match(con, this.operations, con.start, 1,
this.options);
// System.err.println("DEBUG: matchEnd="+matchEnd);
if (matchEnd == con.limit) {
if (con.match != null) {
con.match.setBeginning(0, con.start);
con.match.setEnd(0, matchEnd);
}
con.setInUse(false);
return true;
}
return false;
}
/*
* The pattern has only fixed string. The engine uses Boyer-Moore.
*/
if (this.fixedStringOnly) {
// System.err.println("DEBUG: fixed-only: "+this.fixedString);
int o = this.fixedStringTable.matches(target, con.start, con.limit);
if (o >= 0) {
if (con.match != null) {
con.match.setBeginning(0, o);
con.match.setEnd(0, o + this.fixedString.length());
}
con.setInUse(false);
return true;
}
con.setInUse(false);
return false;
}
/*
* The pattern contains a fixed string. The engine checks with
* Boyer-Moore whether the text contains the fixed string or not. If
* not, it return with false.
*/
if (this.fixedString != null) {
int o = this.fixedStringTable.matches(target, con.start, con.limit);
if (o < 0) {
// System.err.println("Non-match in fixed-string search.");
con.setInUse(false);
return false;
}
}
int limit = con.limit - this.minlength;
int matchStart;
int matchEnd = -1;
/*
* Checks whether the expression starts with ".*".
*/
if (this.operations != null && this.operations.type == Op.CLOSURE
&& this.operations.getChild().type == Op.DOT) {
if (isSet(this.options, SINGLE_LINE)) {
matchStart = con.start;
matchEnd = this.match(con, this.operations, con.start, 1,
this.options);
} else {
boolean previousIsEOL = true;
for (matchStart = con.start; matchStart <= limit; matchStart++) {
int ch = target.setIndex(matchStart);
if (isEOLChar(ch)) {
previousIsEOL = true;
} else {
if (previousIsEOL) {
if (0 <= (matchEnd = this.match(con,
this.operations, matchStart, 1,
this.options)))
break;
}
previousIsEOL = false;
}
}
}
}
/*
* Optimization against the first character.
*/
else if (this.firstChar != null) {
// System.err.println("DEBUG: with firstchar-matching: "+this.firstChar);
RangeToken range = this.firstChar;
for (matchStart = con.start; matchStart <= limit; matchStart++) {
int ch = target.setIndex(matchStart);
if (REUtil.isHighSurrogate(ch) && matchStart + 1 < con.limit) {
ch = REUtil.composeFromSurrogates(ch,
target.setIndex(matchStart + 1));
}
if (!range.match(ch)) {
continue;
}
if (0 <= (matchEnd = this.match(con, this.operations,
matchStart, 1, this.options))) {
break;
}
}
}
/*
* Straightforward matching.
*/
else {
for (matchStart = con.start; matchStart <= limit; matchStart++) {
if (0 <= (matchEnd = this.match(con, this.operations,
matchStart, 1, this.options)))
break;
}
}
if (matchEnd >= 0) {
if (con.match != null) {
con.match.setBeginning(0, matchStart);
con.match.setEnd(0, matchEnd);
}
con.setInUse(false);
return true;
} else {
con.setInUse(false);
return false;
}
} | Checks whether the <var>target</var> text <strong>contains</strong> this
pattern or not.
@param match
A Match instance for storing matching result.
@return Offset of the start position in <VAR>target</VAR>; or -1 if not
match. |
void prepare() {
if (Op.COUNT)
Op.nofinstances = 0;
this.compile(this.tokentree);
/*
* if (this.operations.type == Op.CLOSURE &&
* this.operations.getChild().type == Op.DOT) { // .* Op anchor =
* Op.createAnchor(isSet(this.options, SINGLE_LINE) ? 'A' : '@');
* anchor.next = this.operations; this.operations = anchor; }
*/
if (Op.COUNT)
System.err.println("DEBUG: The number of operations: "
+ Op.nofinstances);
this.minlength = this.tokentree.getMinLength();
this.firstChar = null;
if (!isSet(this.options, PROHIBIT_HEAD_CHARACTER_OPTIMIZATION)
&& !isSet(this.options, XMLSCHEMA_MODE)) {
RangeToken firstChar = Token.createRange();
int fresult = this.tokentree.analyzeFirstCharacter(firstChar,
this.options);
if (fresult == Token.FC_TERMINAL) {
firstChar.compactRanges();
this.firstChar = firstChar;
if (DEBUG)
System.err
.println("DEBUG: Use the first character optimization: "
+ firstChar);
}
}
if (this.operations != null
&& (this.operations.type == Op.STRING || this.operations.type == Op.CHAR)
&& this.operations.next == null) {
if (DEBUG)
System.err.print(" *** Only fixed string! *** ");
this.fixedStringOnly = true;
if (this.operations.type == Op.STRING)
this.fixedString = this.operations.getString();
else if (this.operations.getData() >= 0x10000) { // Op.CHAR
this.fixedString = REUtil.decomposeToSurrogates(this.operations
.getData());
} else {
char[] ac = new char[1];
ac[0] = (char) this.operations.getData();
this.fixedString = new String(ac);
}
this.fixedStringOptions = this.options;
this.fixedStringTable = new BMPattern(this.fixedString, 256, isSet(
this.fixedStringOptions, IGNORE_CASE));
} else if (!isSet(this.options, PROHIBIT_FIXED_STRING_OPTIMIZATION)
&& !isSet(this.options, XMLSCHEMA_MODE)) {
Token.FixedStringContainer container = new Token.FixedStringContainer();
this.tokentree.findFixedString(container, this.options);
this.fixedString = container.token == null ? null : container.token
.getString();
this.fixedStringOptions = container.options;
if (this.fixedString != null && this.fixedString.length() < 2)
this.fixedString = null;
// This pattern has a fixed string of which length is more than one.
if (this.fixedString != null) {
this.fixedStringTable = new BMPattern(this.fixedString, 256,
isSet(this.fixedStringOptions, IGNORE_CASE));
if (DEBUG) {
System.err
.println("DEBUG: The longest fixed string: "
+ this.fixedString.length()
+ "/" // +this.fixedString
+ "/"
+ REUtil.createOptionString(this.fixedStringOptions));
System.err.print("String: ");
REUtil.dumpString(this.fixedString);
}
}
}
} | Prepares for matching. This method is called just before starting
matching. |
@Override
Object unpack(Info vi, Buffer opb) {
InfoMapping0 info = new InfoMapping0();
if (opb.read(1) != 0) {
info.submaps = opb.read(4) + 1;
} else {
info.submaps = 1;
}
if (opb.read(1) != 0) {
info.coupling_steps = opb.read(8) + 1;
for (int i = 0; i < info.coupling_steps; i++) {
int testM = info.coupling_mag[i] = opb.read(Util.ilog2(vi.channels));
int testA = info.coupling_ang[i] = opb.read(Util.ilog2(vi.channels));
if (testM < 0 || testA < 0 || testM == testA || testM >= vi.channels
|| testA >= vi.channels) {
//goto err_out;
info.free();
return (null);
}
}
}
if (opb.read(2) > 0) { /* 2,3:reserved */
info.free();
return (null);
}
if (info.submaps > 1) {
for (int i = 0; i < vi.channels; i++) {
info.chmuxlist[i] = opb.read(4);
if (info.chmuxlist[i] >= info.submaps) {
info.free();
return (null);
}
}
}
for (int i = 0; i < info.submaps; i++) {
info.timesubmap[i] = opb.read(8);
if (info.timesubmap[i] >= vi.times) {
info.free();
return (null);
}
info.floorsubmap[i] = opb.read(8);
if (info.floorsubmap[i] >= vi.floors) {
info.free();
return (null);
}
info.residuesubmap[i] = opb.read(8);
if (info.residuesubmap[i] >= vi.residues) {
info.free();
return (null);
}
}
return info;
} | also responsible for range checking |
protected Attribute createAttribute(QName qname, Datatype datatype) {
QNameContext qnameContext = getQNameContext(qname.getNamespaceURI(),
qname.getLocalPart(), grammarUriContexts);
Attribute at = new Attribute(qnameContext, datatype); // valueType,
return at;
} | QName valueType |
protected SchemaInformedGrammar getSchemaInformedElementFragmentGrammar()
throws EXIException {
// unique qname map
Map<QName, List<XSElementDeclaration>> uniqueNamedElements = getUniqueNamedElements();
if (elementFragment0 != null) {
return elementFragment0;
}
// 8.5.3 Schema-informed Element Fragment Grammar
SchemaInformedGrammar elementFragment1 = new SchemaInformedElement();
elementFragment0 = new SchemaInformedFirstStartTag(elementFragment1);
//
// ElementFragment 1 :
// SE ( F0 ) ElementFragment 1 0
// SE ( F1 ) ElementFragment 1 1
// ...
// SE ( Fm-1 ) ElementFragment 1 m-1
// SE ( * ) ElementFragment 1 m
// EE m+1
// CH [untyped value] ElementFragment 1 m+2
/*
* The variable m in the grammar above represents the number of unique
* element qnames declared in the schema. The variables F0 , F1 , ...
* Fm-1 represent these qnames sorted lexicographically, first by
* local-name, then by uri. If there is more than one element declared
* with the same qname, the qname is included only once. If all such
* elements have the same type name and {nillable} property value, their
* content is evaluated according to specific grammar for that element
* declaration. Otherwise, their content is evaluated according to the
* relaxed Element Fragment grammar described above.
*/
List<QName> uniqueNamedElementsList = new ArrayList<QName>();
Iterator<QName> iter = uniqueNamedElements.keySet().iterator();
while (iter.hasNext()) {
uniqueNamedElementsList.add(iter.next());
}
Collections.sort(uniqueNamedElementsList, qnameSort);
for (int i = 0; i < uniqueNamedElementsList.size(); i++) {
QName fm = uniqueNamedElementsList.get(i);
StartElement se;
List<XSElementDeclaration> elements = uniqueNamedElements.get(fm);
if (elements.size() == 1 || isSameElementGrammar(elements)) {
// se = getStartElement(elements.get(0));
se = translatElementDeclarationToFSA(elements.get(0));
} else {
// content is evaluated according to the relaxed Element
// Fragment grammar
se = createStartElement(fm); // new StartElement(fm);
se.setGrammar(elementFragment0);
}
elementFragment1.addProduction(se, elementFragment1);
}
// SE ( * ) ElementFragment 1 m
elementFragment1.addProduction(START_ELEMENT_GENERIC, elementFragment1);
// EE m+1
elementFragment1.addTerminalProduction(END_ELEMENT);
// CH [untyped value] ElementFragment 1 m+2
elementFragment1.addProduction(CHARACTERS_GENERIC, elementFragment1);
// ElementFragment 0 :
// AT ( A 0 ) [schema-typed value] ElementFragment 0 0
// AT ( A 1 ) [schema-typed value] ElementFragment 0 1
// ...
// AT (A n-1) [schema-typed value] ElementFragment 0 n-1
// AT ( * ) ElementFragment 0 n
// SE ( F0 ) ElementFragment 1 n+1
// SE ( F1 ) ElementFragment 1 n+2
// ...
// SE ( Fm-1 ) ElementFragment 1 n+m
// SE ( * ) ElementFragment 1 n+m+1
// EE n+m+2
// CH [untyped value] ElementFragment 1 n+m+3
/*
* The variable n in the grammar above represents the number of unique
* qnames given to explicitly declared attributes in the schema. The
* variables A 0 , A 1 , ... A n-1 represent these qnames sorted
* lexicographically, first by local-name, then by uri. If there is more
* than one attribute declared with the same qname, the qname is
* included only once. If all such attributes have the same schema type
* name, their value is represented using that type. Otherwise, their
* value is represented as a String.
*/
List<QName> uniqueNamedAttributeList = new ArrayList<QName>();
// create unique qname map
Map<QName, List<XSAttributeDeclaration>> uniqueNamedAttributes = new HashMap<QName, List<XSAttributeDeclaration>>();
Iterator<XSAttributeDeclaration> atts = attributePool.keySet()
.iterator();
while (atts.hasNext()) {
XSAttributeDeclaration atDecl = atts.next();
QName atQname = new QName(atDecl.getNamespace(), atDecl.getName());
if (uniqueNamedAttributes.containsKey(atQname)) {
uniqueNamedAttributes.get(atQname).add(atDecl);
} else {
List<XSAttributeDeclaration> list = new ArrayList<XSAttributeDeclaration>();
list.add(atDecl);
uniqueNamedAttributes.put(atQname, list);
uniqueNamedAttributeList.add(atQname);
}
}
// add global attributes
XSNamedMap nm = xsModel
.getComponents(XSConstants.ATTRIBUTE_DECLARATION);
for (int i = 0; i < nm.getLength(); i++) {
XSAttributeDeclaration atDecl = (XSAttributeDeclaration) nm.item(i);
QName atQname = new QName(atDecl.getNamespace(), atDecl.getName());
if (uniqueNamedAttributes.containsKey(atQname)) {
uniqueNamedAttributes.get(atQname).add(atDecl);
} else {
List<XSAttributeDeclaration> list = new ArrayList<XSAttributeDeclaration>();
list.add(atDecl);
uniqueNamedAttributes.put(atQname, list);
uniqueNamedAttributeList.add(atQname);
}
}
Collections.sort(uniqueNamedAttributeList, qnameSort);
for (int i = 0; i < uniqueNamedAttributeList.size(); i++) {
QName an = uniqueNamedAttributeList.get(i);
Attribute at;
List<XSAttributeDeclaration> attributes = uniqueNamedAttributes
.get(an);
if (attributes.size() == 1 || isSameAttributeGrammar(attributes)) {
at = getAttribute(attributes.get(0));
} else {
// represented as a String
// at = new Attribute(an);
at = createAttribute(an, BuiltIn.getDefaultDatatype()); // BuiltIn.DEFAULT_VALUE_NAME,
}
elementFragment0.addProduction(at, elementFragment0);
}
// AT ( * ) ElementFragment 0 n
elementFragment0.addProduction(ATTRIBUTE_GENERIC, elementFragment0);
// SE ( F0 ) ElementFragment 1 n+1
// ..
for (int i = 0; i < uniqueNamedElementsList.size(); i++) {
QName fm = uniqueNamedElementsList.get(i);
StartElement se;
List<XSElementDeclaration> elements = uniqueNamedElements.get(fm);
if (elements.size() == 1 || isSameElementGrammar(elements)) {
// se = getStartElement(elements.get(0));
se = translatElementDeclarationToFSA(elements.get(0));
} else {
// content is evaluated according to the relaxed Element
// Fragment grammar
se = createStartElement(fm); // new StartElement(fm);
se.setGrammar(elementFragment0);
}
elementFragment0.addProduction(se, elementFragment1);
}
// SE ( * ) ElementFragment 1 n+m+1
elementFragment0.addProduction(START_ELEMENT_GENERIC, elementFragment1);
// EE n+m+2
elementFragment0.addTerminalProduction(END_ELEMENT);
// CH [untyped value] ElementFragment 1 n+m+3
elementFragment0.addProduction(CHARACTERS_GENERIC, elementFragment1);
SchemaInformedGrammar elementFragmentEmpty1 = new SchemaInformedElement();
SchemaInformedFirstStartTagGrammar elementFragmentEmpty0 = new SchemaInformedFirstStartTag(
elementFragmentEmpty1);
// ElementFragmentTypeEmpty 0 :
// AT ( A 0 ) [schema-typed value] ElementFragmentTypeEmpty 0 0
// AT ( A 1 ) [schema-typed value] ElementFragmentTypeEmpty 0 1
// ...
// AT ( A n-1 ) [schema-typed value] ElementFragmentTypeEmpty 0 n-1
// AT ( * ) ElementFragmentTypeEmpty 0 n
// EE n+1
for (int i = 0; i < uniqueNamedAttributeList.size(); i++) {
QName an = uniqueNamedAttributeList.get(i);
Attribute at;
List<XSAttributeDeclaration> attributes = uniqueNamedAttributes
.get(an);
if (attributes.size() == 1 || isSameAttributeGrammar(attributes)) {
at = getAttribute(attributes.get(0));
} else {
// represented as a String
// at = new Attribute(an);
at = createAttribute(an, BuiltIn.getDefaultDatatype()); // BuiltIn.DEFAULT_VALUE_NAME,
}
elementFragmentEmpty0.addProduction(at, elementFragmentEmpty0);
}
elementFragmentEmpty0.addProduction(ATTRIBUTE_GENERIC,
elementFragmentEmpty0);
elementFragmentEmpty0.addTerminalProduction(END_ELEMENT);
// ElementFragmentTypeEmpty 1 :
// EE 0
elementFragmentEmpty1.addTerminalProduction(END_ELEMENT);
/*
* As with all schema informed element grammars, the schema-informed
* element fragment grammar is augmented with additional productions
* that describe events that may occur in an EXI stream, but are not
* explicity declared in the schema. The process for augmenting the
* grammar is described in 8.5.4.4 Undeclared Productions. For the
* purposes of this process, the schema-informed element fragment
* grammar is treated as though it is created from an element
* declaration with a {nillable} property value of true and a type
* declaration that has named sub-types, and ElementFragmentTypeEmpty is
* used to serve as the TypeEmpty of the type in the process.
*/
elementFragment0.setNillable(true);
elementFragment0.setTypeEmpty(elementFragmentEmpty0);
elementFragment0.setTypeCastable(true);
return elementFragment0;
} | http://www.w3.org/TR/exi/#informedElementFragGrammar |
protected static boolean isAdditionalNamespace(String namespaceURI) {
assert (namespaceURI != null);
if (namespaceURI.equals(Constants.XML_NULL_NS_URI)
|| namespaceURI.equals(Constants.XML_NS_URI)
|| namespaceURI.equals(Constants.XML_SCHEMA_INSTANCE_NS_URI)
|| namespaceURI.equals(Constants.XML_SCHEMA_NS_URI)) {
return false;
} else {
return true;
}
} | "http://www.w3.org/2001/XMLSchema" |
protected void addLocalNameStringEntry(String namespaceURI, String localName) {
// fetch localName list
List<String> localNameList = addNamespaceStringEntry(namespaceURI);
// check localName value presence
if (!localNameList.contains(localName)) {
localNameList.add(localName);
}
} | /*
When a schema is provided, the string table (Local-name) is also
pre-populated with the local name of each attribute, element and type
declared in the schema, partitioned by namespace URI and sorted
lexicographically. |
protected boolean isTypeCastable(XSTypeDefinition td) {
boolean isTypeCastable = false;
// has named sub-types
XSNamedMap types = this.xsModel
.getComponents(XSConstants.TYPE_DEFINITION);
for (int i = 0; i < types.getLength(); i++) {
XSTypeDefinition td2 = (XSTypeDefinition) types.item(i);
// if (td.equals(td2.getBaseType())) {
if (td.equals(getBaseType(td2))) {
isTypeCastable = true;
}
}
// is a simple type definition of which {variety} is union
if (!isTypeCastable
&& td.getTypeCategory() == XSTypeDefinition.SIMPLE_TYPE) {
XSSimpleTypeDefinition std = (XSSimpleTypeDefinition) td;
isTypeCastable = (std.getVariety() == XSSimpleTypeDefinition.VARIETY_UNION);
}
return isTypeCastable;
} | } |
protected SchemaInformedFirstStartTagGrammar translateTypeDefinitionToFSA(
XSTypeDefinition td) throws EXIException {
SchemaInformedFirstStartTagGrammar type_i = null;
QName typeName = null;
// type rule already created?
if (!td.getAnonymous()) {
typeName = new QName(td.getNamespace(), td.getName());
if ((type_i = grammarTypes.get(typeName)) != null) {
return type_i;
}
}
// simple vs. complex type handling
if (td.getTypeCategory() == XSTypeDefinition.COMPLEX_TYPE) {
XSComplexTypeDefinition ctd = (XSComplexTypeDefinition) td;
type_i = translateComplexTypeDefinitionToFSA(ctd);
} else {
assert (td.getTypeCategory() == XSTypeDefinition.SIMPLE_TYPE);
XSSimpleTypeDefinition std = (XSSimpleTypeDefinition) td;
type_i = translateSimpleTypeDefinitionToFSA(std);
}
if (!td.getAnonymous()) {
// add to localName table for string table pre-population
addLocalNameStringEntry(td.getNamespace(), td.getName());
// type_i.setTypeName(typeName);
grammarTypes.put(typeName, type_i);
}
return type_i;
} | Given an XML Schema type definition T i , two type grammars are created,
which are denoted by Type i and TypeEmpty i . Type i is a grammar that
fully reflects the type definition of T i , whereas TypeEmpty i is a
grammar that accepts only the attribute uses and attribute wildcards of T
i , if any.
@param td
type definition
@return schema-informed first start tag grammar
@throws EXIException
EXI exception |
protected SchemaInformedFirstStartTagGrammar translateComplexTypeDefinitionToFSA(
XSComplexTypeDefinition ctd) throws EXIException {
// /*
// * anyType is special
// */
// if (Constants.XSD_ANY_TYPE.equals(ctd.getName())
// && XMLConstants.W3C_XML_SCHEMA_NS_URI
// .equals(ctd.getNamespace())) {
// // ur-type
// SchemaInformedFirstStartTagGrammar urType = getUrTypeRule();
// return urType;
// }
/*
* Rule Content
*/
SchemaInformedGrammar ruleContent = null;
switch (ctd.getContentType()) {
case XSComplexTypeDefinition.CONTENTTYPE_EMPTY:
// Represents an empty content type.
// A content type with the distinguished value empty validates
// elements
// with no character or element information item children.
// (attributes only, no content allowed)
ruleContent = new SchemaInformedElement();
ruleContent.addTerminalProduction(END_ELEMENT);
break;
case XSComplexTypeDefinition.CONTENTTYPE_SIMPLE:
// Represents a simple content type.
// A content type which is simple validates elements with
// character-only children.
XSSimpleTypeDefinition std = ctd.getSimpleType();
ruleContent = translateSimpleTypeDefinitionToFSA(std);
break;
case XSComplexTypeDefinition.CONTENTTYPE_ELEMENT:
// Represents an element-only content type.
// An element-only content type validates elements with children
// that conform to the supplied content model.
// The {content model} of a complex type definition is a single
// particle
boolean isMixedContent = false;
ruleContent = handleParticle(ctd, isMixedContent);
break;
default:
assert (ctd.getContentType() == XSComplexTypeDefinition.CONTENTTYPE_MIXED);
// Represents a mixed content type
// The {content model} of a complex type definition is a single
// particle
isMixedContent = true;
ruleContent = handleParticle(ctd, isMixedContent);
break;
}
// create copy of Element_i_content --> Element_i_content_2
// (used for content schema-deviations in start-tags, direct
// jumps)
SchemaInformedGrammar ruleContent2 = ruleContent.duplicate();
if (ruleContent2 instanceof SchemaInformedStartTagGrammar) {
// --> copy to schema informed elements grammar
SchemaInformedElement sie = new SchemaInformedElement();
for (int i = 0; i < ruleContent2.getNumberOfEvents(); i++) {
Production p = ruleContent2.getProduction(i);
sie.addProduction(p.getEvent(), p.getNextGrammar());
}
ruleContent2 = sie;
}
// attributes
XSObjectList attributes = ctd.getAttributeUses();
XSWildcard attributeWC = ctd.getAttributeWildcard();
// boolean isTypeCastable = isTypeCastable(ctd);
// type_i (start tag)
SchemaInformedStartTagGrammar sistr = handleAttributes(ruleContent,
ruleContent2, attributes, attributeWC);
SchemaInformedFirstStartTagGrammar type_i = new SchemaInformedFirstStartTag(
sistr);
// type_i.setTypeCastable(isTypeCastable);
// typeEmpty_i
SchemaInformedGrammar ruleEnd = new SchemaInformedElement();
ruleEnd.addTerminalProduction(END_ELEMENT);
SchemaInformedFirstStartTagGrammar typeEmpty_i = new SchemaInformedFirstStartTag(
handleAttributes(ruleEnd, ruleEnd, attributes, attributeWC));
// typeEmpty_i.setTypeCastable(isTypeCastable);
type_i.setTypeEmpty(typeEmpty_i);
return type_i;
// return ruleContent;
} | Given an XML Schema type definition T i , two type grammars are created,
which are denoted by Type i and TypeEmpty i . Type i is a grammar that
fully reflects the type definition of T i , whereas TypeEmpty i is a
grammar that accepts only the attribute uses and attribute wildcards of T
i , if any.
@param ctd
complex type definition
@return schema-informed first start tag grammar
@throws EXIException
EXI exception |
protected SchemaInformedFirstStartTagGrammar translateSimpleTypeDefinitionToFSA(
XSSimpleTypeDefinition std) throws EXIException {
/*
* Simple content
*/
// QName valueType = this.getValueType(std);
Characters chSchemaValid = new Characters(getDatatype(std)); // valueType,
SchemaInformedGrammar simpleContentEnd = SIMPLE_END_ELEMENT_RULE;
SchemaInformedElement simpleContent = new SchemaInformedElement();
simpleContent.addProduction(chSchemaValid, simpleContentEnd);
// Type i
SchemaInformedFirstStartTagGrammar type_i = new SchemaInformedFirstStartTag(
handleAttributes(simpleContent, simpleContent, null, null));
type_i.setTypeEmpty(SIMPLE_END_ELEMENT_EMPTY_RULE);
return type_i;
} | protected SchemaInformedElement translateSimpleTypeDefinitionToFSA( |
@Override
public AudioFormat[] getTargetFormats(AudioFormat.Encoding targetEncoding, AudioFormat sourceFormat) {
for (Entry<AudioFormat, Map<AudioFormat.Encoding, Collection<AudioFormat>>> entry : m_targetFormatsFromSourceFormat.entrySet()) {
AudioFormat format = entry.getKey();
if (AudioFormats.matches(format, sourceFormat)) {
Map<AudioFormat.Encoding, Collection<AudioFormat>> targetEncodings = entry.getValue();
Collection<AudioFormat> targetFormats = targetEncodings.get(targetEncoding);
if (targetFormats != null) {
return targetFormats.toArray(EMPTY_FORMAT_ARRAY);
}
return EMPTY_FORMAT_ARRAY;
}
}
return EMPTY_FORMAT_ARRAY;
} | /*
public boolean isConversionSupported(AudioFormat.Encoding targetEncoding, AudioFormat sourceFormat)
{
return isAllowedSourceFormat(sourceFormat) &&
isTargetEncodingSupported(targetEncoding);
} |
private void init_jorbis() {
oggSyncState_ = new SyncState();
oggStreamState_ = new StreamState();
oggPage_ = new Page();
oggPacket_ = new Packet();
vorbisInfo = new Info();
vorbisComment = new Comment();
vorbisDspState = new DspState();
vorbisBlock = new Block(vorbisDspState);
buffer = null;
bytes = 0;
currentBytes = 0L;
oggSyncState_.init();
} | Initializes all the jOrbis and jOgg vars that are used for song playback. |
@Override
public void dataReady() {
switch (playState) {
case playState_NeedHeaders:
LOG.log(Level.FINE, "playState = playState_NeedHeaders");
break;
case playState_ReadData:
LOG.log(Level.FINE, "playState = playState_ReadData");
break;
case playState_WriteData:
LOG.log(Level.FINE, "playState = playState_WriteData");
break;
case playState_Done:
LOG.log(Level.FINE, "playState = playState_Done");
break;
case playState_BufferFull:
LOG.log(Level.FINE, "playState = playState_BufferFull");
break;
case playState_Corrupt:
LOG.log(Level.FINE, "playState = playState_Corrupt");
break;
}
// This code was developed by the jCraft group, as JOrbisPlayer.java, slightly
// modified by jOggPlayer developer and adapted by JavaZOOM to suit the JavaSound
// SPI. Then further modified by Tom Kimpton to correctly play ogg files that
// would hang the player.
switch (playState) {
case playState_NeedHeaders:
try {
// Headers (+ Comments).
readHeaders();
} catch (IOException ioe) {
playState = playState_Corrupt;
return;
}
playState = playState_ReadData;
break;
case playState_ReadData:
int result;
index = oggSyncState_.buffer(bufferSize_);
buffer = oggSyncState_.data;
bytes = readFromStream(buffer, index, bufferSize_);
LOG.log(Level.FINE, "More data : {0}", bytes);
if (bytes == -1) {
playState = playState_Done;
LOG.log(Level.FINE, "Ogg Stream empty. Settings playState to playState_Done.");
break;
} else {
oggSyncState_.wrote(bytes);
if (bytes == 0) {
if ((oggPage_.eos() != 0) || (oggStreamState_.e_o_s != 0) || (oggPacket_.e_o_s != 0)) {
LOG.log(Level.FINE, "oggSyncState wrote 0 bytes: settings playState to playState_Done.");
playState = playState_Done;
}
LOG.log(Level.FINE, "oggSyncState wrote 0 bytes: but stream not yet empty.");
break;
}
}
result = oggSyncState_.pageout(oggPage_);
if (result == 0) {
LOG.log(Level.FINE, "Setting playState to playState_ReadData.");
playState = playState_ReadData;
break;
} // need more data
if (result == -1) { // missing or corrupt data at this page position
LOG.log(Level.FINE, "Corrupt or missing data in bitstream; setting playState to playState_ReadData");
playState = playState_ReadData;
break;
}
oggStreamState_.pagein(oggPage_);
LOG.log(Level.FINE, "Setting playState to playState_WriteData.");
playState = playState_WriteData;
break;
case playState_WriteData:
// Decoding !
LOG.log(Level.FINE, "Decoding");
label:
while (true) {
result = oggStreamState_.packetout(oggPacket_);
switch (result) {
case 0:
LOG.log(Level.FINE, "Packetout returned 0, going to read state.");
playState = playState_ReadData;
break label;
case -1:
// missing or corrupt data at this page position
// no reason to complain; already complained above
LOG.log(Level.FINE, "Corrupt or missing data in packetout bitstream; going to read state...");
// playState = playState_ReadData;
// break;
// continue;
break;
default:
// we have a packet. Decode it
if (vorbisBlock.synthesis(oggPacket_) == 0) { // test for success!
vorbisDspState.synthesis_blockin(vorbisBlock);
} else {
//if(TDebug.TraceAudioConverter) TDebug.out("vorbisBlock.synthesis() returned !0, going to read state");
LOG.log(Level.FINE, "VorbisBlock.synthesis() returned !0, continuing.");
continue;
}
outputSamples();
if (playState == playState_BufferFull) {
return;
}
break;
}
} // while(true)
if (oggPage_.eos() != 0) {
LOG.log(Level.FINE, "Settings playState to playState_Done.");
playState = playState_Done;
}
break;
case playState_BufferFull:
continueFromBufferFull();
break;
case playState_Corrupt:
LOG.log(Level.FINE, "Corrupt Song.");
// drop through to playState_Done...
case playState_Done:
oggStreamState_.clear();
vorbisBlock.clear();
vorbisDspState.clear();
vorbisInfo.clear();
oggSyncState_.clear();
LOG.log(Level.FINE, "Done Song.");
try {
if (oggBitStream_ != null) {
oggBitStream_.close();
}
getCircularBuffer().close();
} catch (Exception e) {
LOG.log(Level.FINE, e.getMessage());
}
break;
} // switch
} | Main loop. |
private void outputSamples() {
int samples;
while ((samples = vorbisDspState.synthesis_pcmout(_pcmf, _index)) > 0) {
float[][] pcmf = _pcmf[0];
bout = (samples < convsize ? samples : convsize);
// convert doubles to 16 bit signed ints (host order) and
// interleave
for (i = 0; i < vorbisInfo.channels; i++) {
int pointer = i * 2;
//int ptr=i;
int mono = _index[i];
for (int j = 0; j < bout; j++) {
double fVal = pcmf[i][mono + j] * 32767.;
int val = (int) (fVal);
if (val > 32767) {
val = 32767;
}
if (val < -32768) {
val = -32768;
}
if (val < 0) {
val = val | 0x8000;
}
convbuffer[pointer] = (byte) (val);
convbuffer[pointer + 1] = (byte) (val >>> 8);
pointer += 2 * (vorbisInfo.channels);
}
}
LOG.log(Level.FINE, "about to write: {0}", 2 * vorbisInfo.channels * bout);
if (getCircularBuffer().availableWrite() < 2 * vorbisInfo.channels * bout) {
LOG.log(Level.FINE, "Too much data in this data packet, better return, let the channel drain, and try again...");
playState = playState_BufferFull;
return;
}
getCircularBuffer().write(convbuffer, 0, 2 * vorbisInfo.channels * bout);
if (bytes < bufferSize_) {
LOG.log(Level.FINE, "Finished with final buffer of music?");
}
if (vorbisDspState.synthesis_read(bout) != 0) {
LOG.log(Level.FINE, "VorbisDspState.synthesis_read returned -1.");
}
} // while(samples...)
playState = playState_ReadData;
} | This routine was extracted so that when the output buffer fills up, we
can break out of the loop, let the music channel drain, then continue
from where we were. |
private void readHeaders() throws IOException {
LOG.log(Level.FINE, "readHeaders(");
index = oggSyncState_.buffer(bufferSize_);
buffer = oggSyncState_.data;
bytes = readFromStream(buffer, index, bufferSize_);
if (bytes == -1) {
LOG.log(Level.FINE, "Cannot get any data from selected Ogg bitstream.");
throw new IOException("Cannot get any data from selected Ogg bitstream.");
}
oggSyncState_.wrote(bytes);
if (oggSyncState_.pageout(oggPage_) != 1) {
if (bytes < bufferSize_) {
throw new IOException("EOF");
}
LOG.log(Level.FINE, "Input does not appear to be an Ogg bitstream.");
throw new IOException("Input does not appear to be an Ogg bitstream.");
}
oggStreamState_.init(oggPage_.serialno());
vorbisInfo.init();
vorbisComment.init();
if (oggStreamState_.pagein(oggPage_) < 0) {
// error; stream version mismatch perhaps
LOG.log(Level.FINE, "Error reading first page of Ogg bitstream data.");
throw new IOException("Error reading first page of Ogg bitstream data.");
}
if (oggStreamState_.packetout(oggPacket_) != 1) {
// no page? must not be vorbis
LOG.log(Level.FINE, "Error reading initial header packet.");
throw new IOException("Error reading initial header packet.");
}
if (vorbisInfo.synthesis_headerin(vorbisComment, oggPacket_) < 0) {
// error case; not a vorbis header
LOG.log(Level.FINE, "This Ogg bitstream does not contain Vorbis audio data.");
throw new IOException("This Ogg bitstream does not contain Vorbis audio data.");
}
//int i = 0;
i = 0;
while (i < 2) {
while (i < 2) {
int result = oggSyncState_.pageout(oggPage_);
if (result == 0) {
break;
} // Need more data
if (result == 1) {
oggStreamState_.pagein(oggPage_);
while (i < 2) {
result = oggStreamState_.packetout(oggPacket_);
if (result == 0) {
break;
}
if (result == -1) {
LOG.log(Level.FINE, "Corrupt secondary header. Exiting.");
throw new IOException("Corrupt secondary header. Exiting.");
}
vorbisInfo.synthesis_headerin(vorbisComment, oggPacket_);
i++;
}
}
}
index = oggSyncState_.buffer(bufferSize_);
buffer = oggSyncState_.data;
bytes = readFromStream(buffer, index, bufferSize_);
if (bytes == -1) {
break;
}
if (bytes == 0 && i < 2) {
LOG.log(Level.FINE, "End of file before finding all Vorbis headers!");
throw new IOException("End of file before finding all Vorbis headers!");
}
oggSyncState_.wrote(bytes);
}
byte[][] ptr = vorbisComment.user_comments;
for (byte[] ptr1 : ptr) {
if (ptr1 == null) {
break;
}
String currComment = (new String(ptr1, 0, ptr1.length - 1, Charset.forName("US-ASCII"))).trim();
LOG.log(Level.FINE, "Comment: {0}", currComment);
}
convsize = bufferSize_ / vorbisInfo.channels;
vorbisDspState.synthesis_init(vorbisInfo);
vorbisBlock.init(vorbisDspState);
_pcmf = new float[1][][];
_index = new int[vorbisInfo.channels];
} | Reads headers and comments. |
private int readFromStream(byte[] buffer, int index, int bufferSize_) {
int bytes;
try {
bytes = oggBitStream_.read(buffer, index, bufferSize_);
} catch (Exception e) {
LOG.log(Level.FINE, "Cannot Read Selected Song");
bytes = -1;
}
currentBytes = currentBytes + bytes;
return bytes;
} | Reads from the oggBitStream_ a specified number of Bytes(bufferSize_)
worth starting at index and puts them in the specified buffer[].
@param buffer
@param index
@param bufferSize_
@return the number of bytes read or -1 if error. |
Token parseRegex() throws ParseException {
Token tok = this.parseTerm();
Token parent = null;
while (this.read() == T_OR) {
this.next(); // '|'
if (parent == null) {
parent = Token.createUnion();
parent.addChild(tok);
tok = parent;
}
tok.addChild(this.parseTerm());
}
return tok;
} | regex ::= term (`|` term)*
term ::= factor+
factor ::= ('^' | '$' | '\A' | '\Z' | '\z' | '\b' | '\B' | '\<' | '\>'
| atom (('*' | '+' | '?' | minmax ) '?'? )?)
| '(?=' regex ')' | '(?!' regex ')' | '(?<=' regex ')' | '(?<!' regex ')'
atom ::= char | '.' | range | '(' regex ')' | '(?:' regex ')' | '\' [0-9]
| '\w' | '\W' | '\d' | '\D' | '\s' | '\S' | category-block |
Token parseTerm() throws ParseException {
int ch = this.read();
Token tok = null;
if (ch == T_OR || ch == T_RPAREN || ch == T_EOF) {
tok = Token.createEmpty();
} else {
tok = this.parseFactor();
Token concat = null;
while ((ch = this.read()) != T_OR && ch != T_RPAREN && ch != T_EOF) {
if (concat == null) {
concat = Token.createConcat();
concat.addChild(tok);
tok = concat;
}
concat.addChild(this.parseFactor());
// tok = Token.createConcat(tok, this.parseFactor());
}
}
if (ch == T_RPAREN) {
parenCount--;
}
return tok;
} | term ::= factor+ |
Token parseFactor() throws ParseException {
int ch = this.read();
Token tok;
switch (ch) {
case T_CARET:
return this.processCaret();
case T_DOLLAR:
return this.processDollar();
case T_LOOKAHEAD:
return this.processLookahead();
case T_NEGATIVELOOKAHEAD:
return this.processNegativelookahead();
case T_LOOKBEHIND:
return this.processLookbehind();
case T_NEGATIVELOOKBEHIND:
return this.processNegativelookbehind();
case T_COMMENT:
this.next();
return Token.createEmpty();
case T_BACKSOLIDUS:
switch (this.chardata) {
case 'A':
return this.processBacksolidus_A();
case 'Z':
return this.processBacksolidus_Z();
case 'z':
return this.processBacksolidus_z();
case 'b':
return this.processBacksolidus_b();
case 'B':
return this.processBacksolidus_B();
case '<':
return this.processBacksolidus_lt();
case '>':
return this.processBacksolidus_gt();
}
// through down
}
tok = this.parseAtom();
ch = this.read();
switch (ch) {
case T_STAR:
return this.processStar(tok);
case T_PLUS:
return this.processPlus(tok);
case T_QUESTION:
return this.processQuestion(tok);
case T_CHAR:
if (this.chardata == '{' && this.offset < this.regexlen) {
int off = this.offset; // this.offset -> next of '{'
int min = 0, max = -1;
if ((ch = this.regex.charAt(off++)) >= '0' && ch <= '9') {
min = ch - '0';
while (off < this.regexlen
&& (ch = this.regex.charAt(off++)) >= '0'
&& ch <= '9') {
min = min * 10 + ch - '0';
if (min < 0)
throw ex("parser.quantifier.5", this.offset);
}
} else {
throw ex("parser.quantifier.1", this.offset);
}
max = min;
if (ch == ',') {
if (off >= this.regexlen) {
throw ex("parser.quantifier.3", this.offset);
} else if ((ch = this.regex.charAt(off++)) >= '0'
&& ch <= '9') {
max = ch - '0'; // {min,max}
while (off < this.regexlen
&& (ch = this.regex.charAt(off++)) >= '0'
&& ch <= '9') {
max = max * 10 + ch - '0';
if (max < 0)
throw ex("parser.quantifier.5", this.offset);
}
if (min > max)
throw ex("parser.quantifier.4", this.offset);
} else { // assume {min,}
max = -1;
}
}
if (ch != '}')
throw ex("parser.quantifier.2", this.offset);
if (this.checkQuestion(off)) { // off -> next of '}'
tok = Token.createNGClosure(tok);
this.offset = off + 1;
} else {
tok = Token.createClosure(tok);
this.offset = off;
}
tok.setMin(min);
tok.setMax(max);
// System.err.println("CLOSURE: "+min+", "+max);
this.next();
}
}
return tok;
} | factor ::= ('^' | '$' | '\A' | '\Z' | '\z' | '\b' | '\B' | '\<' | '\>'
| atom (('*' | '+' | '?' | minmax ) '?'? )?)
| '(?=' regex ')' | '(?!' regex ')' | '(?<=' regex ')' | '(?<!' regex ')'
| '(?#' [^)]* ')'
minmax ::= '{' min (',' max?)? '}'
min ::= [0-9]+
max ::= [0-9]+ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.