_id
stringlengths
2
6
partition
stringclasses
3 values
text
stringlengths
4
46k
language
stringclasses
1 value
title
stringclasses
1 value
d5201
train
There could be multiple reasons why this query is failing You are using '''+@Email+''' SQL server changes '' into ' which means when you pass ''' it converts to '' If you want to pass a string variable in you query it should be like WHERE Email = '" + Update.Email + "' This way it be passed as string Second option is you could send all the parameter in procedure and according to data can execute you query, It will be much easier to maintain and understand in future A: I found a solution to my issue. Based on suggested feedback I came up with this: try { foreach (Base_Item item in Game_Data._Item_Drop) { //Recipe if (item.Name.Contains("Recipe")) { if (Update_Recipe(item.Name)) { uri = Game_Data._EndPoint + "value"; Update.Value_1 = "1";//Update.Value_1 + "UPDATE Skill_Gemology SET " + item.Name + "= 1 WHERE Email = ''" + Update.Email + "'' "; Update.Value_2 = "Skill_Gemology"; Update.Value_3 = item.Name; } } //Rune else if (item.Name.Contains("Rune") || item.Name == "Polished_Ancient_Stone" || item.Name == "Ancient_Stone") { uri = Game_Data._EndPoint + "value"; Update.Value_1 = item.Count.ToString();//Update.Value_1 + "UPDATE [dbo].[Inventory_Runes] SET [" + item.Name + "] = [" + item.Name + "] + " + item.Count.ToString() + " WHERE Email = ''" + Update.Email + "''"; Update.Value_2 = item.Name; } StartCoroutine(booleanwebrequest(uri, Update)); Debug.Log(Update); } } catch (Exception ex) { Debug.Log(ex); } API Backend: public bool Save_Item_Drop(string Email, string value, string column) { try { SqlConnection SQLConn = new SqlConnection(cn); SqlCommand cmd = new SqlCommand("spHere", SQLConn); bool Success; cmd.CommandType = CommandType.StoredProcedure; cmd.Parameters.AddWithValue("@Email", Email); cmd.Parameters.AddWithValue("@Value", value); cmd.Parameters.AddWithValue("@Column", column); SQLConn.Open(); Success = Convert.ToBoolean(cmd.ExecuteScalar()); SQLConn.Close(); return Success; } catch (Exception ex) { string _Product = "White Box Gaming API"; dynamic _Method = Convert.ToString(System.Reflection.MethodBase.GetCurrentMethod().Name); dynamic _Class = Convert.ToString(this.GetType().Name); string _Exception = Convert.ToString(ex.ToString()); Log_Product_Exception(_Product, _Class, _Method, _Exception); return false; } } SQL: ALTER PROCEDURE [dbo].[spHere] @Email AS NVARCHAR(500), @Value AS NVARCHAR(1000), @Column As NVARCHAR(1000) AS BEGIN DECLARE @Response AS BIT = 0 DECLARE @Statement AS NVARCHAR(MAX) SET @Statement = 'UPDATE Inventory_Runes SET ' + @Column + ' = ' + @Column + ' + ' + @Value + ' WHERE Email = ''' + @Email + '''' -------Pre Check----------------------------------------------------------------------------------------------------- --Runes IF Not EXISTS(SELECT Email FROM [dbo].[Inventory_Runes] WHERE Email = @Email) BEGIN INSERT INTO [dbo].[Inventory_Runes] (Email) VALUES (@Email) END BEGIN TRY EXECUTE sp_executesql @Statement SET @Response = 1 END TRY BEGIN CATCH SET @Response = 0 END CATCH SELECT @Response END
unknown
d5202
train
Laravel Eloquent has 2 methods, load and with, you may choose the ideal one for you (in this case load). You may use the following code: $order = new Order(); $order->name = "lorem" //some polymorphic relationship (hasOne) $order->user()->save(new User()); return $order->load('user');
unknown
d5203
train
You would have to set the session lifetime for the role that has the highest, and then, saving dates on your database, log users out after the amount of time that you want.
unknown
d5204
train
They are section headers and footers. You can set them in table view datasource tableView:titleForHeaderInSection: and tableView:titleForFooterInSection: methods
unknown
d5205
train
* *As the message clearly says, this is in case the request to allocate memory fails. (Exactly how this might happen is irrelevant; it is possible, so the code should handle it.) *The author is assuming that NULL==0, which is often true, but not necessarily so, and (as we both seem to think) is a bad assumption to make.
unknown
d5206
train
Give a common class to all these elements, and then make all of them available to ZeroClipBoard : < a id="c101" class="toBeCopied" href="something"> < a id="c102" class="toBeCopied" href="something else"> Then load them like this : var clip = new ZeroClipboard($(".toBeCopied"));
unknown
d5207
train
If you look at the docs you see that the function passed to runTransaction is a function returning a promise (the result of transaction.get().then()). Since an async function is just a function returning a promise you might as well write db.runTransaction(async transaction => {}) You only need to return something from this function if you want to pass data out of the transaction. For example if you only perform updates you won't return anything. Also note that the update function returns the transaction itself so you can chain them: try { await db.runTransaction(async transaction => { transaction .update( db.collection("col1").doc(id1), dataFor1 ) .update( db.collection("col2").doc(id2), dataFor2 ); }); } catch (err) { throw new Error(`Failed transaction: ${err.message}`); } A: IMPORTANT: As noted by a couple of the users, this solution doesn't use the transaction properly. It just gets the doc using a transaction, but the update runs outside of it. Check alsky's answer. https://stackoverflow.com/a/52452831/683157 Take a look to the documentation, runTransaction must receive the updateFunction function as parameter. (https://firebase.google.com/docs/reference/js/firebase.firestore.Firestore#runTransaction) Try this var docRef = admin.firestore().collection("docs").doc(docId); let doc = await admin.firestore().runTransaction(t => t.get(docRef)); if (!doc.exists) {throw ("doc not found");} var newLikes = doc.data().likes + 1; await doc.ref.update({ likes: newLikes }); A: The above did not work for me and resulted in this error: "[Error: Every document read in a transaction must also be written.]". The below code makes use of async/await and works fine. try{ await db.runTransaction(async transaction => { const doc = await transaction.get(ref); if(!doc.exists){ throw "Document does not exist"; } const newCount = doc.data().count + 1; transaction.update(ref, { count: newCount, }); }) } catch(e){ console.log('transaction failed', e); } A: In my case, the only way I could get to run my transaction was: const firestore = admin.firestore(); const txRes = await firestore.runTransaction(async (tx) => { const docRef = await tx.get( firestore.collection('posts').doc( context.params.postId ) ); if(!docRef.exists) { throw new Error('Error - onWrite: docRef does not exist'); } const totalComments = docRef.data().comments + 1; return tx.update(docRef.ref, { comments: totalComments }, {}); }); I needed to add my 'collection().doc()' to tx.get directly and when calling tx.update, I needed to apply 'docRef.ref', without '.ref' was not working...
unknown
d5208
train
Filter the product() of those subsets: from itertools import product for combo in product([1, 2], [1, 2, 3], [2, 3, 4]): if len(set(combo)) == 3: print(combo) or as a list comprehension: [combo for combo in product([1, 2], [1, 2, 3], [2, 3, 4]) if len(set(combo)) == 3] Output: >>> from itertools import product >>> [combo for combo in product([1, 2], [1, 2, 3], [2, 3, 4]) if len(set(combo)) == 3] [(1, 2, 3), (1, 2, 4), (1, 3, 2), (1, 3, 4), (2, 1, 3), (2, 1, 4), (2, 3, 4)]
unknown
d5209
train
solved this problem by updating Visual Studio 2012.
unknown
d5210
train
You should be able to BASE64 encode the image, and use the resulting string as the src of the img tag. For example: <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAWQAAAD8CAYAAAB..."/> Also make sure that your content type is set as text/html instead of text/plain. Looking at the mail, it seems that it's set to the latter, hence why images don't render and tags render as text. Add the following to your arguments to use text/html content type: :headers => { 'Content-Type' => 'text/html' }
unknown
d5211
train
DrawerLayout should be the parent of your layout. Here`s my example: <?xml version="1.0" encoding="utf-8"?> <androidx.drawerlayout.widget.DrawerLayout xmlns:android="http://schemas.android.com/apk/res/android" xmlns:app="http://schemas.android.com/apk/res-auto" xmlns:tools="http://schemas.android.com/tools" android:id="@+id/mDrawer" android:layout_width="match_parent" android:layout_height="match_parent" android:fitsSystemWindows="false" tools:openDrawer="end"> <androidx.constraintlayout.widget.ConstraintLayout android:layout_width="match_parent" android:layout_height="match_parent" > //my code </androidx.constraintlayout.widget.ConstraintLayout> <com.google.android.material.navigation.NavigationView android:id="@+id/navrapoarte" android:layout_width="match_parent" android:layout_height="match_parent" android:layout_gravity="start" > //my code <androidx.constraintlayout.widget.ConstraintLayout android:layout_width="match_parent" android:layout_height="match_parent" android:minHeight="200dp" android:orientation="vertical"> </androidx.constraintlayout.widget.ConstraintLayout> </com.google.android.material.navigation.NavigationView> </androidx.drawerlayout.widget.DrawerLayout>
unknown
d5212
train
I guess you can use Reflections for this: double GetHHSum<T>(T x) where T : class { double result = 0; var properties = typeof(T).GetProperties(); foreach (var property in properties) { if (property.Name.StartsWith("HH")) sum += Convert.ToSingle(property.GetValue(x)).GetValueOrDefault(); } return result; } And then use it like this: return (from e in Data where e.SD == date select e).ToList().Select(x => GetHHSum(x)).FirstOrDefault(); Code is not tested A: I might be wrong because I don't know your data, but it seems to me that they are not fully normalized (repetitive attributes). You might consider going to the 3rd form normal - thus create a/some separate table that will contain one value by row - and then to join your 2 tables in your linq query. The link query will look much much better, and you will later be able to change your HH fields without changing your queries. A: One suggestion is to refactor the above code to use LINQ method chains and lambdas (personal preference), then extract the select lambda into a separate method. For instance: // Note select e and .Select(x => x..) is redundant. Only need one var query1 = Data.Where(e => e.SD == date).Select(SumOfHValues); return query1.FirstOrDefault(); // Note types are unclear in your question so I've put dummy placeholders private static QueryResultType SumOfHValues(YourInputClassType x) { // Nothing wrong with this syntactically, it will be faster than a // reflection solution // // Algorithmic code tends to have this sort of look & feel. // You could make it more readable // by commenting exactly what the summation is doing and // with a mathematical notation or link to documentation / web source return x.HH01 + x.HH16 + x.HH17 + x.HH18 + x.HH19 + x.HH20 + x.HH21 + x.HH22 + x.HH23 + x.HH24 + x.HH25 + x.HH26 + x.HH27 + x.HH28 + x.HH29 + x.HH30 + x.HH31 + x.HH32 + x.HH33 + x.HH34 + x.HH35 + x.HH36 + x.HH37 + x.HH38 + x.HH39 + x.HH40 + x.HH41 + x.HH42 + x.HH43 + x.HH44 + x.HH45 + x.HH46 + x.HH47 + x.HH48 + x.HH49.GetValueOrDefault() + x.HH50.GetValueOrDefault() } In addition if you wanted to call GetValueOrDefault() on each HHxx property you could wrap this in a further helper function. this really boils down to code preference. Which do you prefer? Seeing .GetValueOrDefault() on the end of each property access or a function around it? e.g. return x.HH01 + x.HH16 + x.HH17 + x.HH18 becomes return Get(x.HH01) + Get(x.HH16) + Get(x.HH17) + Get(x.HH18) ... private static HClassType Get(HClassType input) { return input.GetValueOrDefault(); } Personally I would just go with ordering my HHxx + HHyy code in columns and calling .GetValueOrDefault() on each one. If it's put in a helper method at least its only written once, even if it is verbose. Best regards,
unknown
d5213
train
Assuming you have some kind of id in the item, you could do something like this. queue.pipe( groupBy((item) => item.id), // split queue values into independent obs based on grouping key mergeMap( // process each group obs in parallel (group$) => group$.pipe( switchMap((item) => this.httpClient.get(item)) //cancel prev req in the group if still running ) ) ) .subscribe(console.log); cheers
unknown
d5214
train
This question was confusing, since it seemed to describe a very unlikely condition. How could a newly-configured CloudFront distribution with a new certificate from ACM offer an invalid certificate? In truth, I was distracted by part of the "helpful" browser error message, "You might be connecting to a server that is pretending to be..." I mistakenly assumed that this implied that the hostname in the certificate was correct ("pretending to be") but that the certificate was invalid for some other reason. As it turns out, the certificate being offered was the default, generic *.cloudfront.net certificate, so the hostname in the cert didn't match the custom domain name. After creating a certificate in ACM, it needs to be associated with the CloudFront distribution, as mentioned at https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesSSLCertificate. Another hint to the nature of the problem would have been observable in the ACM console. There, the certificate would have shown In Use? No. In Use? – Whether the ACM Certificate is actively associated with an AWS service such as Elastic Load Balancing or CloudFront. The value can be No or Yes. https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-list.html#gs-acm-list-console
unknown
d5215
train
You should take a look at the APOC procedures. This is straightforward to install within your Neo4j environment and provide many additional functionalities, e.g. to export your results or data to .csv file.
unknown
d5216
train
The reason the square bracket [ and ] chars are causing chaos is due to the fact that they represent the basic character class in Regular Expressions. Additionally, express.js allows its routes to be defined with regular expressions in them. So, here is what you are actually telling express to respond to when you say: /xxx/xxx/items[5] it is trying to match the given URL with /xxx/xxx/items5 To fix it, define your route like this: app.get(/^\/api\/analytics\/v1\/deviceCount\/Building\[\d+\]\/today/, function(req, res) {...}); EDIT: I just found out that express allows for parameter validation. So you can try something like this as well... app.param("building_5", /^Building\[\d+\]$/); app.get("/api/analytics/v1/deviceCount/:building_5/today", function(req, res) {...}); A: When you're passing a string to a regex function in JS you need to escape the backslash before the opening bracket, like so router.get('/api/analytics/v1/deviceCount/Building\\[5\]/today', function(req, res) { Related: How can I put [] (square brackets) in RegExp javascript? Also, use this to get the number in the brackets app.get('/api/analytics/v1/deviceCount/Building\\[:n\]/today', function(req, res) { res.send(req.params.n) });
unknown
d5217
train
Take a look at netifaces. It should help. Here is example from their documentation: >>> netifaces.interfaces() ['lo0', 'gif0', 'stf0', 'en0', 'en1', 'fw0'] >>> netifaces.ifaddresses('lo0') {18: [{'addr': ''}], 2: [{'peer': '127.0.0.1', 'netmask': '255.0.0.0', 'addr': '127.0.0.1'}], 30: [{'peer': '::1', 'netmask': 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff', 'addr': '::1'}, {'peer': '', 'netmask': 'ffff:ffff:ffff:ffff::', 'addr': 'fe80::1%lo0'}]} And it should work on OS X, Linux and Windows.
unknown
d5218
train
In Angular, there are a couple of ways to do this. If you need to generate HTML in the typescript and then interpolate it into the template, you can use a combination of the DomSanitizer and the innerHTML attribute into other elements (for example a span). Below would be an example of what I suggested above: hello-world.component.ts: @Component({ selector: "hello-world", templateUrl: "./hello-world.component.html", styleUrls: ["./hello-world.component.scss"] }) export class HelloWorld { innerHTML: string = `<p>Hello, world!</p>`; } sanitze.pipe.ts: @Pipe({ name='sanitize' }) export class SanitizePipe { constructor(private sanitizer: DomSanitizer) { } transform(value: string): SafeHtml { return this.sanitizer.bypassSecurityTrustHtml(value); } } hello-world.component.html: <div [innerHTML]="innerHTML | sanitize"</div>
unknown
d5219
train
You are getting an error because System.Console.Clear (along with other methods that attempt to control/query the console such as System.Console.[Get|Set]CursorPosition) requires a console/TTY but none is attached to the program. To run your code as-is, you should be able to use the --tty option to docker run to allocate a pseudo-TTY, e.g. docker run --tty <image>. To modify your code to not require this, you'd probably want to create your own wrapper for System.Console.Clear that wraps it in a try-catch: void ClearConsole() { try { System.Console.Clear(); } catch (System.IO.IOException) { // do nothing } } If only targeting Windows, you can alternatively do a P/Invoke call to GetConsoleWindow to check whether a console exists before calling System.Console.Clear: class Program { [System.Runtime.InteropServices.DllImport("kernel32.dll")] static extern System.IntPtr GetConsoleWindow(); static void ClearConsole() { if (GetConsoleWindow() != System.IntPtr.Zero) { System.Console.Clear(); } } }
unknown
d5220
train
I guess it's because char is 16-bit in java. So when you increment key key[crypt_ptr] += (char) 1 or add two chars key[crypt_ptr] += key[crypt_ptr + 1], it acts in different way from c (where char is 8-bit). Try to use bytes everywhere instead of chars, just use symbol codes for initialization. A: Your key values need to be 8-bit. Try byte[] key = "X02$B:".getBytes(); A: Why don't you show us an example where the differences manifest themselves? BTW, I would write: char[] key = {'X','0','2','$','B',':', '\0'};
unknown
d5221
train
I am thinking that the code should maybe look like this: @bot.event async def on_message(message): message = await bot.wait_for_message(author=message.author) if message.content.startswith('!activate'): global key key = message.content[len('!activate'):].strip() print(key) if message.content.startswith('!genlifetime password'): global amount amount = message.content[len('!genlifetime password'):].strip() num = int(amount) chars = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0'] for x in range(0, num): authkey1 = '' authkey2 = '' authkey3 = '' authkey4 = '' for i in range(0,4): authkey1 = authkey1 + chars[random.randrange(0,35)] for i in range(0,4): authkey2 = authkey2 + chars[random.randrange(0,35)] for i in range(0,4): authkey3 = authkey3 + chars[random.randrange(0,35)] for i in range(0,4): authkey4 = authkey4 + chars[random.randrange(0,35)] authkey = authkey1 + '-' + authkey2 + '-' + authkey3 + '-' + authkey4 print(authkey) with open(keyfile, 'a') as f: f.write(authkey + ' LIFETIME \n') Although, I haven't tested this. A: The wait_for_message calls seem superfluous: according to the docs, the message argument passed into the callbacks is the message you want. If you were to remove the calls to wait_for_message and just use the message passed in directly, it would probably work as expected.
unknown
d5222
train
when moving data from Hot to UltraWarm you incur cost because you have new UltraWarm nodes and S3 storage associated with it. However, this allows you to: * *use less EBS size *have less data nodes (since the some of your queries will be now handled by the UltraWarm nodes)
unknown
d5223
train
Everything should be loaded and initialized just fine, so calling: System.out.println(infosEmail.getEmpresa()); should give expected value. Problem The problem is in the default implementation of toString() method (done via @Data) at EmailCameraOffline class, which does not include inherited fields. Solution To fix this you can "override" @Data's toString() implementation to include inherited fields as well using Lombok as: @Data @ToString(callSuper = true) public class EmailCameraOffline extends QueueContent { ... }
unknown
d5224
train
The solution can be found on codeproject.com in article "Dynamic Table Mapping for LINQ-to-SQL." Below is a static class you can use. Please see the article for instructions on what you must do to use the 4 different generic methods. Here is an invocation example: public interface IResult { [Column(IsPrimaryKey = true)] int Id { get; set; } [Column] string Name { get; set; } [Column] double Value { get; set; } } public void TestThis() { var connectionString = "Data Source=.\SQLEXPRESS;Initial Catalog=YourDatabaseName;Integrated Security=True;Pooling=False"; var context = new DataContext(connectionString); var table = context.GetTable<IResult>("YourTableName"); var query = from r in table where r.Id == 108 select r; var list = query.ToList(); } Class Code: namespace Prototype.NamedTable { using System; using System.Collections; using System.Collections.Generic; using System.Data.Linq; using System.Data.Linq.Mapping; using System.Linq; using System.Linq.Expressions; using System.Reflection; using System.Reflection.Emit; /// <summary> /// The utility. /// </summary> public static class Utility { #region Constants and Fields /// <summary> /// The named types. /// </summary> private static readonly Dictionary<string, Type> NamedTypes = new Dictionary<string, Type>(); /// <summary> /// The _assembly builder. /// </summary> private static AssemblyBuilder _assemblyBuilder; /// <summary> /// The _module builder. /// </summary> private static ModuleBuilder _moduleBuilder; #endregion #region Properties /// <summary> /// Gets or sets a value indicating whether Verbose. /// </summary> public static bool Verbose { get; set; } #endregion #region Public Methods /// <summary> /// The clear. /// </summary> public static void Clear() { _assemblyBuilder = null; NamedTypes.Clear(); } /// <summary> /// Retrieve a table from the data context which implements ITable&lt;TEntity&gt; by T and use ITable&lt;TBack&gt; /// </summary> /// <typeparam name="TEntity"> /// Entity Type /// </typeparam> /// <typeparam name="TBack"> /// Backing Type /// </typeparam> /// <param name="context"> /// Data Context /// </param> /// <returns> /// </returns> public static ATable<TEntity> GetTable<TEntity, TBack>(this DataContext context) where TEntity : class where TBack : class { // Create the backup table Table<TBack> refer = context.GetTable<TBack>(); // Prepare the cloning method Delegate cloneFrom = CompileCloning(typeof(TEntity), typeof(TBack)); // Construct the table wrapper return new ATable<TEntity>(refer, cloneFrom); } /// <summary> /// Retrieve a table from the data context which implements ITable&lt;TEntity&gt; uses specific backing table /// </summary> /// <typeparam name="TEntity"> /// Entity Type /// </typeparam> /// <param name="context"> /// Data context /// </param> /// <param name="name"> /// Table name /// </param> /// <returns> /// </returns> public static ATable<TEntity> GetTable<TEntity>(this DataContext context, string name) where TEntity : class { // Create/Retrieve a type definition for the table using the TEntity type Type type = DefineEntityType(typeof(TEntity), name); // Create the backup table using the new type ITable refer = context.GetTable(type); // Prepare the cloning method Delegate cloneFrom = CompileCloning(typeof(TEntity), type); // Construct the table wrapper return new ATable<TEntity>(refer, cloneFrom); } /* /// <summary> /// The log. /// </summary> /// <param name="format"> /// The format. /// </param> /// <param name="args"> /// The args. /// </param> public static void Log(string format, params object[] args) { if (!Verbose) { return; } Console.Write("*** "); if ((args == null) || (args.Length == 0)) { Console.WriteLine(format); } else { Console.WriteLine(format, args); } }*/ #endregion #region Methods /// <summary> /// Clone an attribute /// </summary> /// <param name="attr"> /// </param> /// <returns> /// </returns> private static CustomAttributeBuilder CloneColumn(object attr) { Type source = attr.GetType(); Type target = typeof(ColumnAttribute); var props = new List<PropertyInfo>(); var values = new List<object>(); // Extract properties and their values foreach (PropertyInfo prop in source.GetProperties()) { if (!prop.CanRead || !prop.CanWrite) { continue; } props.Add(target.GetProperty(prop.Name)); values.Add(prop.GetValue(attr, null)); } // Create a new attribute using the properties and values return new CustomAttributeBuilder( target.GetConstructor(Type.EmptyTypes), new object[0], props.ToArray(), values.ToArray()); } /// <summary> /// Make a delegate that copy content from "source" to "dest" /// </summary> /// <param name="source"> /// Source Type /// </param> /// <param name="dest"> /// Destination Type /// </param> /// <returns> /// Executable delegate /// </returns> private static Delegate CompileCloning(Type source, Type dest) { // Input parameter ParameterExpression input = Expression.Parameter(source); // For every property, create a member binding List<MemberBinding> binds = source.GetProperties().Select( prop => Expression.Bind( dest.GetProperty( prop.Name, BindingFlags.Instance | BindingFlags.Public | BindingFlags.DeclaredOnly), Expression.MakeMemberAccess(input, prop))).Cast<MemberBinding>().ToList(); // Expression of creating the new object MemberInitExpression body = Expression.MemberInit( Expression.New(dest.GetConstructor(Type.EmptyTypes)), binds); // The final lambda LambdaExpression lambda = Expression.Lambda(body, input); // MJE //Log("{0}", lambda.ToString()); // Return the executable delegate return lambda.Compile(); } /// <summary> /// Create a class based on the template interface /// </summary> /// <param name="template"> /// </param> /// <param name="name"> /// </param> /// <returns> /// </returns> private static Type DefineEntityType(Type template, string name) { // Prepare the builders if not done if (_assemblyBuilder == null) { _assemblyBuilder = AppDomain.CurrentDomain.DefineDynamicAssembly( new AssemblyName(Guid.NewGuid().ToString()), AssemblyBuilderAccess.Run); _moduleBuilder = _assemblyBuilder.DefineDynamicModule("Types"); } // Check if there is already a type created for that table if (NamedTypes.ContainsKey(name)) { return NamedTypes[name]; } // Create the new type TypeBuilder tbuilder = null; if (template.IsInterface) { tbuilder = DefineInterfaceChild(name, template); } else { tbuilder = DefineOverriddenChild(name, template); } Type final = tbuilder.CreateType(); NamedTypes[name] = final; return final; } /// <summary> /// The define interface child. /// </summary> /// <param name="name"> /// The name. /// </param> /// <param name="template"> /// The template. /// </param> /// <returns> /// </returns> private static TypeBuilder DefineInterfaceChild(string name, Type template) { TypeBuilder tbuilder = _moduleBuilder.DefineType( name, TypeAttributes.Public, typeof(Object), new[] { template }); // Default constructor tbuilder.DefineDefaultConstructor(MethodAttributes.Public); // Attach Table attribute var abuilder = new CustomAttributeBuilder( typeof(TableAttribute).GetConstructor(Type.EmptyTypes), new object[0], new[] { typeof(TableAttribute).GetProperty("Name") }, new object[] { name }); tbuilder.SetCustomAttribute(abuilder); List<PropertyInfo> properties = template.GetProperties().ToList(); // May require sorting // Implement all properties)); foreach (PropertyInfo prop in properties) { // Define backing field FieldBuilder fbuilder = tbuilder.DefineField( "_" + prop.Name, prop.PropertyType, FieldAttributes.Private); // Define get method MethodBuilder pgbuilder = tbuilder.DefineMethod( "get_" + prop.Name, MethodAttributes.Public | MethodAttributes.SpecialName | MethodAttributes.HideBySig | MethodAttributes.Virtual | MethodAttributes.Final, prop.PropertyType, Type.EmptyTypes); // Define get method body { return _field; } ILGenerator ilg = pgbuilder.GetILGenerator(); ilg.Emit(OpCodes.Ldarg_0); ilg.Emit(OpCodes.Ldfld, fbuilder); ilg.Emit(OpCodes.Ret); // Define set method MethodBuilder psbuilder = tbuilder.DefineMethod( "set_" + prop.Name, MethodAttributes.Public | MethodAttributes.SpecialName | MethodAttributes.HideBySig | MethodAttributes.Virtual | MethodAttributes.Final, null, new[] { prop.PropertyType }); // Define set method body { _field = value; } ILGenerator ils = psbuilder.GetILGenerator(); ils.Emit(OpCodes.Ldarg_0); ils.Emit(OpCodes.Ldarg_1); ils.Emit(OpCodes.Stfld, fbuilder); ils.Emit(OpCodes.Ret); // Define the property PropertyBuilder pbuilder = tbuilder.DefineProperty( prop.Name, PropertyAttributes.None, CallingConventions.Standard, prop.PropertyType, null); // Set get/set method pbuilder.SetGetMethod(pgbuilder); pbuilder.SetSetMethod(psbuilder); // Attach Column attribute foreach (object attr in prop.GetCustomAttributes(false)) { if (attr is ColumnAttribute || attr is AlterColumnAttribute) { // MJE //Log("Create column attribute for {0}", prop.Name); pbuilder.SetCustomAttribute(CloneColumn(attr)); break; } } } return tbuilder; } /// <summary> /// The define overridden child. /// </summary> /// <param name="name"> /// The name. /// </param> /// <param name="template"> /// The template. /// </param> /// <returns> /// </returns> private static TypeBuilder DefineOverriddenChild(string name, Type template) { TypeBuilder tbuilder = _moduleBuilder.DefineType(name, TypeAttributes.Public, template); // Default constructor tbuilder.DefineDefaultConstructor(MethodAttributes.Public); // Attach Table attribute var abuilder = new CustomAttributeBuilder( typeof(TableAttribute).GetConstructor(Type.EmptyTypes), new object[0], new[] { typeof(TableAttribute).GetProperty("Name") }, new object[] { name }); tbuilder.SetCustomAttribute(abuilder); List<PropertyInfo> properties = template.GetProperties().ToList(); // May require sorting // Implement all properties)); foreach (PropertyInfo prop in properties) { // Define get method MethodBuilder pgbuilder = tbuilder.DefineMethod( "get_" + prop.Name, MethodAttributes.Public | MethodAttributes.SpecialName | MethodAttributes.HideBySig | MethodAttributes.Virtual | MethodAttributes.Final, prop.PropertyType, Type.EmptyTypes); // Define get method body { return _field; } ILGenerator ilg = pgbuilder.GetILGenerator(); ilg.Emit(OpCodes.Ldarg_0); ilg.Emit(OpCodes.Call, template.GetMethod("get_" + prop.Name)); ilg.Emit(OpCodes.Ret); // Define set method MethodBuilder psbuilder = tbuilder.DefineMethod( "set_" + prop.Name, MethodAttributes.Public | MethodAttributes.SpecialName | MethodAttributes.HideBySig | MethodAttributes.Virtual | MethodAttributes.Final, null, new[] { prop.PropertyType }); // Define set method body { _field = value; } ILGenerator ils = psbuilder.GetILGenerator(); ils.Emit(OpCodes.Ldarg_0); ils.Emit(OpCodes.Ldarg_1); ils.Emit(OpCodes.Call, template.GetMethod("set_" + prop.Name)); ils.Emit(OpCodes.Ret); // Define the property PropertyBuilder pbuilder = tbuilder.DefineProperty( prop.Name, PropertyAttributes.None, CallingConventions.Standard, prop.PropertyType, null); // Set get/set method pbuilder.SetGetMethod(pgbuilder); pbuilder.SetSetMethod(psbuilder); // Attach Column attribute foreach (object attr in prop.GetCustomAttributes(false)) { if (attr is ColumnAttribute || attr is AlterColumnAttribute) { // MJE //Log("Create column attribute for {0}", prop.Name); pbuilder.SetCustomAttribute(CloneColumn(attr)); break; } } } return tbuilder; } #endregion /// <summary> /// A table wrapper implements ITable&lt;TEntity&gt; backed by other ITable object /// </summary> /// <typeparam name="TEntity"> /// </typeparam> public class ATable<TEntity> : ITable<TEntity> where TEntity : class { #region Constants and Fields /// <summary> /// Cloning method /// </summary> private readonly Delegate _clone; /// <summary> /// Backing table /// </summary> private readonly ITable _internal; #endregion #region Constructors and Destructors /// <summary> /// Initializes a new instance of the <see cref="ATable{TEntity}"/> class. /// Construct from backing table /// </summary> /// <param name="inter"> /// </param> /// <param name="from"> /// </param> public ATable(ITable inter, Delegate from) { this._internal = inter; this._clone = from; } #endregion #region Properties /// <summary> /// Gets ElementType. /// </summary> public Type ElementType { get { // Use the backing table element return this._internal.ElementType; } } /// <summary> /// Gets Expression. /// </summary> public Expression Expression { get { // Use the backing table expression return this._internal.Expression; } } /// <summary> /// Gets Provider. /// </summary> public IQueryProvider Provider { get { // Use the backing table provider return this._internal.Provider; } } #endregion #region Implemented Interfaces #region IEnumerable /// <summary> /// The get enumerator. /// </summary> /// <returns> /// </returns> /// <exception cref="NotImplementedException"> /// </exception> IEnumerator IEnumerable.GetEnumerator() { throw new NotImplementedException(); } #endregion #region IEnumerable<TEntity> /// <summary> /// The get enumerator. /// </summary> /// <returns> /// </returns> /// <exception cref="NotImplementedException"> /// </exception> public IEnumerator<TEntity> GetEnumerator() { throw new NotImplementedException(); } #endregion #region ITable<TEntity> /// <summary> /// The attach. /// </summary> /// <param name="entity"> /// The entity. /// </param> /// <exception cref="NotImplementedException"> /// </exception> public void Attach(TEntity entity) { throw new NotImplementedException(); } /// <summary> /// The delete on submit. /// </summary> /// <param name="entity"> /// The entity. /// </param> public void DeleteOnSubmit(TEntity entity) { // Directly invoke the backing table this._internal.DeleteOnSubmit(entity); } /// <summary> /// The insert on submit. /// </summary> /// <param name="entity"> /// The entity. /// </param> public void InsertOnSubmit(TEntity entity) { // Input entity must be changed to backing type object v = this._clone.DynamicInvoke(entity); // Invoke the backing table this._internal.InsertOnSubmit(v); } #endregion #endregion } /// <summary> /// The alter column attribute. /// </summary> public class AlterColumnAttribute : Attribute { #region Constants and Fields /// <summary> /// The _can be null. /// </summary> private bool _canBeNull = true; /// <summary> /// The _update check. /// </summary> private UpdateCheck _updateCheck = UpdateCheck.Always; #endregion #region Properties /// <summary> /// Gets or sets AutoSync. /// </summary> public AutoSync AutoSync { get; set; } /// <summary> /// Gets or sets a value indicating whether CanBeNull. /// </summary> public bool CanBeNull { get { return this._canBeNull; } set { this._canBeNull = value; } } /// <summary> /// Gets or sets DbType. /// </summary> public string DbType { get; set; } /// <summary> /// Gets or sets Expression. /// </summary> public string Expression { get; set; } /// <summary> /// Gets or sets a value indicating whether IsDbGenerated. /// </summary> public bool IsDbGenerated { get; set; } /// <summary> /// Gets or sets a value indicating whether IsDiscriminator. /// </summary> public bool IsDiscriminator { get; set; } /// <summary> /// Gets or sets a value indicating whether IsPrimaryKey. /// </summary> public bool IsPrimaryKey { get; set; } /// <summary> /// Gets or sets a value indicating whether IsVersion. /// </summary> public bool IsVersion { get; set; } /// <summary> /// Gets or sets UpdateCheck. /// </summary> public UpdateCheck UpdateCheck { get { return this._updateCheck; } set { this._updateCheck = value; } } #endregion } } } A: Linq-to-Sql cannot materialize interfaces. It needs a class specification to know what instances it should create from a query. The exception message is elusive, to say the least. I don't know why it isn't more to the point. Note that the class you want to materialize must have been mapped, or: it must be in the dbml. I say this because your ITIPO class is not partial, which makes me wonder how you can make it implement an interface (well, maybe you just slimmed down the code). Side note: don't use all capitals for class names, and prefix an interface specification with "I", not a class.
unknown
d5225
train
You should use Selenium in this case which will open the page in a browser and then you can handle the click event of navigator button and access the refreshed DOM each time. Here is a simple code for your reference: from selenium import webdriver browser = webdriver.Firefox() browser.get("http://www.google.com") browser.find_element_by_id("lst-ib").send_keys("book") browser.find_element_by_name("btnK").click() A: The page url you shared, shows that the page-numbers can be accessed through the following hyperlink-tags: * *Current Page: <a class="pager currentpage"> *Other Pages (each): <a class="pager"> You can access the relevant information as follows. The second line will give you a list of all the pages. Extract the "href" attribute from them. When you click on the button, a javascript fires and most likely appends a part of the url to open the new page. soup.findall('a', _class="pager currentpage") soup.findall('a', _class="pager") This is the text of one of the buttons. You will need to study the page source further to figure out what url is needed. <a class="pager currentpage" href="javascript:WebForm_DoPostBackWithOptions(new WebForm_PostBackOptions( &quot;ctl00$ContentPlaceHolder1$gvItem$ctl01$ctl03&quot;, &quot;&quot;, true, &quot;&quot;, &quot;&quot;, false, true))" style="display:inline-block;width:27px;">1</a> function __doPostBack(eventTarget, eventArgument) { if (!theForm.onsubmit || (theForm.onsubmit() != false)) { theForm.__EVENTTARGET.value = eventTarget; theForm.__EVENTARGUMENT.value = eventArgument; theForm.submit(); } } A better Option Use selenium browser automation to execute the clicks on such javascript-wrapped buttons. A: I was able to achieve without selenium. Although code is not complete but we can scrape all the pages by looping __EVENTTARGET from bs4 import BeautifulSoup import requests,json def returnJson(wordmark): url = "https://classicalnumismaticgallery.com/advancesearch.aspx?auctioncode=0&pricerange=0&keyword=indore&category=&material=0&lotno=&endlotno=" r_init = requests.get(url) soup = BeautifulSoup(r_init.text, 'html.parser') event_validation = soup.find("input", attrs={"name" : "__EVENTVALIDATION"})['value'] view_state = soup.find("input", attrs={"name" : "__VIEWSTATE"})['value'] pages=4 event_target = 'ctl00$ContentPlaceHolder1$gvItem$ctl01$ctl{:>02d}'.format(pages) postdata = { 'ctl00$ContentPlaceHolder1$DDLFilter' : '0', '__EVENTVALIDATION' : event_validation, '__EVENTTARGET' : event_target, "__VIEWSTATE" : view_state, } r = requests.post(url, data=postdata) return r def scraping(r): description='' soup = BeautifulSoup(r.text, 'html.parser') desc = soup.find_all('div' , {'class' : 'product_contain'}) for d in desc: print(d) scraping(returnJson('indore'))
unknown
d5226
train
You have two errors: * *trying to attach series to data, shoule be: series: series *wrong format for points, should be: { low: from, high: to, x: x } See fixed demo: http://jsfiddle.net/a7rmx/45/
unknown
d5227
train
Use the option method to change the source: var source = $(".selector").autocomplete("option", "source", "/New/Source");
unknown
d5228
train
The easiest way to convert between the two is to convert the .NET time to a timespan in milliseconds from the UNIX epoch time: public static long ToEpochDate(this DateTime dt) { var epoch = new DateTime(1970, 1, 1); return dt.Subtract(epoch).Ticks; } You can then use that to generate your JS string: DateTime current = DateTime.Now; var jsDate = string.Format("Date.UTC({0})", current.ToEpochDate());
unknown
d5229
train
I used the Revit Lookup tool and browsed through the database to find a class called StartingViewSettings with the property ViewId that will get me the ElementId of the starting view. My actual code for getting the view is below FilteredElementCollector startingViewSettingsCollector = new FilteredElementCollector(document); startingViewSettingsCollector.OfClass(typeof(StartingViewSettings)); View startingView = null; foreach(StartingViewSettings settings in startingViewSettingsCollector) { startingView = (View)document.GetElement(settings.ViewId); }
unknown
d5230
train
It sounds like the workqueue interface might be what you're after - or for something lighter-weight, a kfifo combined with a rwsem semaphore. A: I would strongly advise against keeping the VxWorks architecture on Linux. Kernel thread proliferation is frowned upon, your code will never make it into official kernel tree. Even if you don't care about that, are you 100% sure that you want to develop a driver in a non-standard way ? Things would be much simpler if you would just get rid of these two tasks. BTW, why on earth you need tasks for PCI driver to begin with ?
unknown
d5231
train
The lightest weight approach to this would not be to create or use classes for your data. You can instead use plain JavaScript objects, and just describe their types strongly enough for your use cases. So instead of a Data class, you can have an interface, and instead of using instances of the Map class with string-valued keys, you can just use a plain object with a string index signature to represent the type of data you already have: interface Data { name: string; config: string; entries: { [k: string]: number } } To make a valid Data, you don't need to use new anywhere; just make an object literal with name, config, and entries properties of the right types. The entries property is { [k: string]: number }, which means that you don't know or care what the keys are (other than the fact that they are strings as opposed to symbols), but the property values at those keys should be numbers. Armed with that definition, let's convert data.datas to Data[] in a way that meets your three criteria: const datas: Data[] = data.datas.map(d => ({ config: "origin", // any default values you want ...d, // the object entries: onlyNumberValues(d.entries ?? {}) // filter out non-numeric entries })); function onlyNumberValues(x: { [k: string]: unknown }): { [k: string]: number } { return Object.fromEntries( Object.entries(x).filter( (kv): kv is [string, number] => typeof kv[1] === "number" ) ); } * *The above sets the entries property to be a filtered version of the entries property in the incoming data, if it exists. (If entries does not exist, we use an empty object {}). The filter function is onlyNumberValues(), which breaks the object into its entries via the Object.entries() method, filters these entries with a user-defined type guard function, and packages them back into an object via the Object.fromEntries() method. The details of this function's implementation can be changed, but the idea is that you perform whatever validation/transformation you need here. *Any required property that may be absent in the JSON file should be given a default value. We do this by creating an object literal that starts with these default properties, after which we spread in the properties from the JSON object. We do this with the config property above. If the JSON object has a config property, it will overwrite the default when spread in. (At the very end we add in the entries property explicitly, to overwrite the value in the object with the filtered version). *Because we've spread the JSON object in, any properties added to the JSON object will automatically be added. Just remember to specify any defaults for these new properties, if they are required. Let's make sure this works as desired: console.log(datas) /* [{ "config": "origin", "name": "test1", "entries": { "red": 1, "green": 2 } }, { "config": "remote", "name": "test2", "entries": { "red": 1, "blue": 3 } }, { "config": "origin", "name": "test3", "entries": { "red": 1, "blue": 3, "purple": 3 } }] */ Looks good. Playground link to code
unknown
d5232
train
Aaaargh! It was the offset! When I remove it, the script processes all the records as intended. The first iteration of the loop processed the first 100 records, removing them from the set of records that didn't have the meta value. This left 710. But the next iteration started from offset 100, which meant the first 100 of those 710 weren't processed. I didn't need an offset – I just needed to select the first batch of remaining records each time. The corrected script is class Mark_Comments_Reviewed { public function __invoke() { $limit = 100; while ( $comment_ids = $this->get_unreviewed_comments( $limit ) ) { array_walk( $comment_ids, function ( $comment_id ) { $comment_id = (int) $comment_id; $this->process_comment( $comment_id ); } ); } } protected function get_unreviewed_comments( int $limit ): array { global $wpdb; return $wpdb->get_col( $wpdb->prepare( "SELECT DISTINCT comments.comment_ID FROM wp_comments AS comments AND comments.comment_ID NOT IN ( SELECT meta.comment_id FROM wp_commentmeta AS meta WHERE meta.meta_key = 'reviewed' ) LIMIT %d" $limit ) ); } protected function process_comment( int $comment_id ): bool { return ! ! update_comment_meta( $comment_id, 'reviewed', (int) true ); } }
unknown
d5233
train
enumerate() has MANY problems: * *you are not using strcpy() and strcat() correctly, so you are trashing memory. You are not allocating enough memory to hold the result of strcpy(), which copies characters until it reaches a null terminator. You are allocating memory for 2 fewer characters than needed (the last char in the path, and the null terminator). You should be allocating strlen+1 characters instead of strlen-1 characters. And worse, you are using strcat() to concatenate a filename onto the allocated string without first reallocating the string to make room for the filename. *you are leaking the allocated string, as you never call delete[] for it. *the if inside the loop is missing != 0 when checking strcmp(".."). *you are pushing pointers into queue to data that is local to enumerate() and gets overwritten on each loop iteration, and goes out of scope when enumerate() exits. Your threads are expecting pointers to data that are stable and do not disappear behind their backs. This is the root of your garbage output. Consider yourself lucky that your code is simply outputting garbage and not just crashing outright. *you are not testing the data.dwFileAttributes field correctly. You need to use the & (bitwise AND) operator instead of the == (equals) operator. Folders and files can have multiple attributes, but you are only interested in checking for one, so you have to test that specific bit by itself and ignore the rest. You really should be using std::string instead for string management, and let it handle memory allocations for you. Also, consider using std::filesystem or boost::filesystem to handle the enumeration. Also, there is no need to push "DONE" strings into the queue after enumerating. When a thread is signaled and goes to extract a string and sees the queue is empty, just exit the thread. Try something more like this instead: #include <windows.h> #include <iostream> #include <string> #include <queue> #include <thread> #include <mutex> #include <conditional_variable> std::queue<std::string> paths; std::mutex mtx; std::conditional_variable cv; bool done = false; void enumerate(const std::string &path) { std::string searchPath = path; if ((!searchPath.empty()) && (searchPath[searchPath.length()-1] != '\\')) searchPath += '\\'; WIN32_FIND_DATA data; HANDLE hFind = FindFirstFileA((searchPath + "*").c_str(), &data); if (hFind != INVALID_HANDLE_VALUE) { do { if ((strcmp(data.cFileName, ".") != 0) && (strcmp(data.cFileName, "..") != 0)) { string fullpath = searchPath + data.cFileName; { std::lock_guard<std::mutex> lock(mtx); paths.push(fullpath); cv.notify_one(); } if (data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) enumerate(fullpath); } } while (FindNextFileA(hFind, &data)); FindClose(hFind); } } void print_queue() { std::unique_lock<std::mutex> lock(mtx); while (true) { cv.wait(lock, [](){ return (!paths.empty()) || done; }); if (paths.empty()) return; std::string rez = paths.front(); paths.pop(); std::cout << rez << std::endl; } } int main() { std::thread thread1(print_queue); std::thread thread2(print_queue); std::thread thread3(print_queue); std::thread thread4(print_queue); enumerate("C:\\"); done = true; cv.notify_all(); thread1.join(); thread2.join(); thread3.join(); thread4.join(); return 0; } A: You nowhere have written which kind of queue you use, but I guess it's a queue<char*>. This means it stores only pointers to memory which is owned somewhere else. When you now do queue.push(data.cFileName); you write a pointer to the queue which is not valid after the next iteration, since data changes there. After enumerate exists the data pointers (and thereby queue elements) will even point to undefined memory, which would explain the output. To fix this store copies of the file names inside the queue, e.g. by using a queue<std::string>
unknown
d5234
train
It looks like Git::SVNReplay might fit the bill. A: One approach might be to push your Git repository up to a private repo at GitHub, where you can use Git and everybody else can use Subversion to access the same repository. A: Maybe Pushing an existing git repository to SVN solves your problem. Use svn switch --relocate file:///tmp/repos file:///tmp/newlocation . to connect existing checkouts to the new svn repository.
unknown
d5235
train
yes you can do that. Here is a sample demo let port = 10840; angular.module("app",[]) .value('version', '0.1') .constant('configuration', { webroot: 'http://127.0.0.1:' + port } ) .controller("ctrl",function($scope,configuration){ console.log(configuration.webroot) }) <script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.0.1/angular.min.js"></script> <div ng-app="app" ng-controller="ctrl"> </div>
unknown
d5236
train
http://cocoawithlove.com/2009/11/writing-parser-using-nsscanner-csv.html Here is a good place to start for creating a CSV parser. It's complete with sample code and user comments.
unknown
d5237
train
Are you using antivirus software (e.g. Avast) and is it inspecting your HTTPS traffic? It does this by acting like a MITM so you connect it it and it connects to the real website. And if they only support http/1 (which as far as I know they only do) then that would explain this. Though oddly not for for Medium unless you have an exception for this. Should be easy enough to check by looking at the HTTPS cert when visiting the site to see if it was "issued" by your local Avast server. If not that then suggest you look at your ciphers as HTTP/2 is picky about which ones it uses. Anything weird showing on https://www.ssllabs.com/servertest for your site? What cipher is it using for Chrome?
unknown
d5238
train
My mistake was that I was using @RequestScoped instead of @ViewScoped in my bean.
unknown
d5239
train
let f = (fun v -> v) in ((f 3), (f true)) B: let f = (fun v -> v) in ((fun g -> let f = g in f) f) C: let f = (fun v -> v) in ((fun g -> let f = g in ((f 3), (f true))) f) For A and B, there is no problem. But for C, OCaml reports error: Error: This expression has type bool but an expression was expected of type int So for A, when evaluating ((f 3), (f true)), f's type is 'a -> 'a, for B, when evaluating let f = g in f, f's type is 'a -> 'a. But for C, when evaluating ((f 3), (f true)), f's type is int -> int. Why C's f doesn't have type 'a -> 'a? I have difficulty in understanding the implementation of OCaml's let polymorphism, I'll appreciate it a lot if anyone can give a concise description of it with respect to the question. A: Your code is unnecessarily confusing because you're using the same name f for two different things in B and also two different things in C. Inside C you have this function: fun g -> let f = g in (f 3, f true) Again this is unnecessarily complicated; it's the same as: fun g -> (g 3, g true) The reason this isn't allowed is that it only works if g is a polymorphic function. This requires rank 2 polymorphism, i.e., it requires the ability to define function parameters that are polymorphic. I'm not exactly sure what you're trying to do, but you can have a record type whose field is a polymorphic function. You can then use this record type to define something like your function: # type r = { f : 'a . 'a -> 'a };; type r = { f : 'a. 'a -> 'a; } # (fun { f = g } -> (g 3, g true)) { f = fun x -> x };; - : int * bool = (3, true) # let myfun { f = g } = (g 3, g true);; val myfun : r -> int * bool = <fun> # myfun { f = fun x -> x };; - : int * bool = (3, true) The downside is that you need to pack and unpack your polymorphic function. As a side comment, your example doesn't seem very compelling, because the number of functions of type 'a -> 'a is quite limited.
unknown
d5240
train
Managed to do it. If anyone else is interested I used the following function: function date_compare($a, $b) { $t1 = strtotime($a->fields[3]); $t2 = strtotime($b->fields[3]); return $t1 - $t2; } $data = $params['data']; usort($data, 'date_compare'); $smarty->assign('sorted', $data); A: Try converting the strings to timestamps first: function do_sort($a, $b) { $aval = strtotime($a); $bval = strtotime($b); if ($aval == $bval) { return 0; } return $aval < $bval ? -1 : 1; }
unknown
d5241
train
Your code is not being sorted due to the way you are making your call. Here is what is happening at the moment: $agenda = Agenda::all() Load every agenda in the database ->take(3) From all those agendas I loaded, take the first three. ->sortBy('date'); Sort only those three by date. To achieve what you appear to want, judging by your request, you would call $agenda = Agenda::where('date', '>=', $the_date)->orderBy('date', 'asc')->take(3)->get(); Where $the_date is the date you want to be the minimum. Typically, you would use a date function from the Carbon library to do this: $the_date = \Carbon\Carbon::now(); This query is forcing the work to be done in database, with the call to get finally retrieving the results. In order, we are telling the database to: * *Filter all the agendas to only those with a date greater than the one we pass in *Order the filtered set by the date, starting from the earliest *Take the first three from that filtered set *Return that set to the $agendas variable.
unknown
d5242
train
You can change your SQL and be more explicit about which fields you're inserting, and leave id out of the list: insert into asset_histories (date) select datapoint2 as `date` ...etc Here's a long real example: jim=# create table test1 (id serial not null, date date not null, name text not null); NOTICE: CREATE TABLE will create implicit sequence "test1_id_seq" for serial column "test1.id" CREATE TABLE jim=# create table test2 (id serial not null, date date not null, name text not null); NOTICE: CREATE TABLE will create implicit sequence "test2_id_seq" for serial column "test2.id" CREATE TABLE jim=# insert into test1 (date, name) values (now(), 'jim'); INSERT 0 1 jim=# insert into test1 (date, name) values (now(), 'joe'); INSERT 0 1 jim=# insert into test1 (date, name) values (now(), 'bob'); INSERT 0 1 jim=# select * from test1; id | date | name ----+------------+------ 1 | 2013-03-14 | jim 2 | 2013-03-14 | joe 3 | 2013-03-14 | bob (3 rows) jim=# insert into test2 (date, name) select date, name from test1 where name <> 'jim'; INSERT 0 2 jim=# select * from test2; id | date | name ----+------------+------ 1 | 2013-03-14 | joe 2 | 2013-03-14 | bob (2 rows) As you can see, only the selected rows were inserted, and they were assigned new id values in table test2. You'll have to be explicit about all the fields you want to insert, and ensure that the ordering of the insert and the select match. Having said all that, you might want to look into the activerecord-import gem, which makes this sort of thing a lot more Railsy. Assuming you have a bunch of new AssetHistory objects (not persisted yet), you could insert them all with: asset_histories = [] asset_histories << AssetHistory.new date: some_date asset_histories << AssetHistory.new date: some_other_date AssetHistory.import asset_histories That will generate a single efficient insert into the table, and handle the id for you. You'll still need to query some data and construct the objects, which may not be faster than doing it all with raw SQL, but may be a better alternative if you've already got the data in Ruby objects.
unknown
d5243
train
It seems the problem was that the lucyapp user did not have sufficient privileges to create the table. I basically had to ensure that the \dn+ command produced this result: lucy=# \dn+ List of schemas Name | Owner | Access privileges | Description --------+----------+----------------------+------------------------ public | postgres | postgres=UC/postgres+| standard public schema | | =UC/postgres +| | | lucyapp=UC/postgres | (1 row) where lucyapp has both USAGE (U) and CREATE (C) privileges. Following https://www.postgresql.org/docs/9.0/static/sql-grant.html, this can be achieved with the commands GRANT USAGE ON SCHEMA public TO lucyapp; GRANT CREATE ON SCHEMA public TO lucyapp; I also made lucyapp a superuser prior to running these commands, although that is not recommended for production.
unknown
d5244
train
The put() is asynchronous. If you want to get the url after the file is uploaded you have to do it like this: firebase.storage().ref().child(`${imageFolder}/profile.jpg`).put(file).then((snapshot) => { storageRef = snapshot.downloadURL: console.log(snapshot.downloadURL); });
unknown
d5245
train
Appending the following, for example for traffic overlay; &layer=t For other overlays just use the link button in the top right of the bottom left hand pane after selecting the layer you want to see which parameters need to be added to the URL to show the given overlay(s)
unknown
d5246
train
Take a look on the Ext.data.model's constructor. http://docs.sencha.com/extjs/4.2.3/#!/api/Ext.data.Model-method-constructor You can pass your data into it and it will map it to your model's fields. So you can do something like: var model = new Ext.data.model(Ext.decode(<yourJsonString>)); Ext.data.model can be replaced with your model class.
unknown
d5247
train
Got the solution. I had missed this code in my manifest file <uses-library android:name="com.google.android.maps"/> A: Class not found exception in MainActivity. I think you've the wrong package, or that your APK doesn't have what you think it has.This is your package name. com.example.airlife Maku sure that it is imported in all activities as. package com.example.airlife; and don't forget to add entry in manifest file for every new activity. If so, then make this change in manifest file. No need to mention your package name and remove allowBackUp <activity android:name=".MainActivity" android:label="@string/app_name" > <intent-filter> <action android:name="android.intent.action.MAIN" /> <category android:name="android.intent.category.LAUNCHER" /> </intent-filter> </activity> A: if not this try adding any library files you are using. missing library files may also cause this exception. see this link java.lang.RuntimeException: Unable to instantiate activity ComponentInfo after SDK update
unknown
d5248
train
Since you're storing hospitaltimer as seconds from the unix epoch, substracting both strtotime figures would then be converted to a date that's, for example, 1680 seconds after the unix epoch. Not what you're looking for. I'd suggest approaching this by storing the "exit time" in yyyy-mm-dd format a.- You'd need to alter your table and convert hospitaltimer to a DATETIME field. that's pretty straight-forward b.- change your code to store the hospitaltimer value as a yyyy-mm-dd hh:mm:ss like this: $current_time = date('Y-m-d H:i:s', strtotime('now') + 1800); // This variable goes to the database and then change the logic to show the remaining time: $timer = date_create($result[0]['hospitaltimer']); // the "exit time" you inserted $now = date_create(date('Y-m-d H:i:s')); $diff = date_diff($timer,$now)->format('%r%i minutes %s seconds'); if ($diff < 0) { echo "out"; } else { echo $diff." before being able to leave"; } Alternatively, substracting the output of different strtotime() operations would be a non-standard way to calculate dates which you could work around by: $timer = $timer - strtotime('now'); // now the timer reads 1680 per your example $seconds = $timer % 60; // use modulo 60 to calculate seconds remaining after dividing seconds by 60 and retrieving the integer part of the result $minutes = ($timer - $seconds) / 60; // from full timer, substract the seconds and the remaining amount, divided by 60 is converted to minutes Both of these alternatives would work, but the benefit of storing the hospitaltimer as a timestamp is that at any given point you could easily query the table to find out how many users are still hospitalized (for example) by simply using a where hospitaltimer > now()
unknown
d5249
train
Most Excel worksheet formulas are not case sensitive, so you don't need UPPER(). Your original formula has a logic error. It can only return TRUE if A is NOT blank. But in the TRUE part, you have another IF statement that is only TRUE if A6 is blank. That situation never happens. How complicated the formula will be depends on what the logic is. If the result should always be one thing for "n" or a blank input, and another thing for a "y" input, maybe you only need to test for "y" instead of testing for "" and "n". =if(and(A1="y",B1="y"),X&" - "&Y,X) If that does not answer your question, please edit your question, provide a data sample and the desired result and explain the logic. Then post a comment, so people get alerted about the update.
unknown
d5250
train
Two things: * *The last element in tableList says bundlestream=..., while expectedList just says stream=.... Something is clearly different. Edit: It appears the OP edited out this change; so it must have been a typo, which leaves: *Are you sure the objects stored in the list implement equals() properly (or, in this case, the keys and values in the HashMaps in the lists)? If those objects' equals() are not returning expected results, then neither will the ArrayList's equals(). Temporary Edit: @OP: Can you run the following code: public static <T,U> void dumpList (List<HashMap<T,U>> list) { System.out.println("List:"); for (HashMap<T,U> map:list) for (Map.Entry<T,U> e:map.entrySet()) System.out.println(e.getKey().getClass() + ", " + e.getValue().getClass()); } On both of your lists, e.g.: dumpList(tableList); dumpList(expectedList); And post the output that it prints in your question? A: Data is different stream=A vs bundlestream=A A: Your tableList has bundlestream=A A: The problem is that the last object of data is different in them. stream=A and bundlestream=A A: equals() for ArrayList is inherited from AbstractList: public boolean equals(Object o) Compares the specified object with this list for equality. Returns true if and only if the specified object is also a list, both lists have the same size, and all corresponding pairs of elements in the two lists are equal. (Two elements e1 and e2 are equal if (e1==null ? e2==null : e1.equals(e2)).) In other words, two lists are defined to be equal if they contain the same elements in the same order. So, perhaps you have not overridden .equals() for at least one of the objects in your List. A: First you have to understand how equals works in ArrayList. And also you stored HashMap with in Array. This is how equals method works in ArrayList public boolean equals(Object paramObject) { if (paramObject == this) return true; if (!(paramObject instanceof List)) { return false; } ListIterator localListIterator1 = listIterator(); ListIterator localListIterator2 = ((List) paramObject).listIterator(); while ((localListIterator1.hasNext()) && (localListIterator2.hasNext())) { Object localObject1 = localListIterator1.next(); Object localObject2 = localListIterator2.next(); if (localObject1 == null) if (localObject2 != null) ; else if (!(localObject1.equals(localObject2))) return false; } return ((!(localListIterator1.hasNext())) && (!(localListIterator2.hasNext()))); } Yes, ArrayList internally compare each and evertObject in List, the compare by means of again, equals method of element in ArrayList. In your case the element is HashMap, so U also need to know how equals method works in HashMap. http://javarevisited.blogspot.in/2011/02/how-hashmap-works-in-java.html So, the sort and quick solution is that just overide equals method of your own. And dont forget to implement hasCode method also. How to return index of ArrayList<Field> Sorry for the long Answer.
unknown
d5251
train
The code is incorrect as the PDF files do not embed full PNG images (as opposed to JPEG). The images with FlateDecode filter include only raw image data has been compressed with Flate method. You have to decompress the data to get the raw image data, convert it to RGB (based on the colorspace defined on the PDF image image) and using the other properties defined on the PDF image object (Width, Height, etc) you can construct a PNG image.
unknown
d5252
train
try this public static int score2 { get { return GameObject.FindWithTag("Player").GetComponent<gameScript>().score; } } A: You have a lot of possibilities. The first one is to set your Score as a static argument for you gameScript. * *So you can access it anywhere just like that : int myScore = gameScript.Score ; *And the declaration should be : public static int score; The second possibilities is far better if you want to save a lot of differents values from differents script. In this case, you need to define a gameContext singleton. If you don't know what is this, you should take a look at singleton in C# : [https://msdn.microsoft.com/en-us/library/ff650316.aspx] Singleton will allow you to have a single instance of your gameContext. In your case, your singleton will have a Score attribute. And you will be able to get the value from any scene and any scripts. This is the best way so far. A: score2 is read once at start and then never again. int is an integral type in C# and thus passed by value i.e. it receives a copy. There several ways to solve this problem. The easiest solution is to access the gameScript.score directly - it provides read/write access to everyone anyway. To encapsulate it you may choose to define a property. A better way could be to define a new class GameStatus which holds all relevant things. This can be implemented as singleton for example.
unknown
d5253
train
You could reference this tutorial: AzureAD/azure-activedirectory-library-for-python: Connect to Azure SQL Database. It is doable to connect to Azure SQL Database by obtaining a token from Azure Active Directory (AAD), via ADAL Python. We do not currently maintain a full sample for it, but this essay outlines some key ingredients. * *You follow the instruction of Connecting using Access Token to provision your application. There is another similar blog post here. *Your SQL admin need to add permissions for the app-registration to the specific database that you are trying to access. See details in this blog post Token-based authentication support for Azure SQL DB using Azure AD auth by Mirek H Sztajno. *It was not particularly highlighted in either of the documents above, but you need to use https://database.windows.net/ as the resource string. Note that you need to keep the trailing slash, otherwise the token issued would not work. *Feed the configuration above into ADAL Python's Client Credentials sample. *Once you get the access token, use it in this way in pyodbc to connect to SQL Database. This works with AAD access tokens. Example code to expand the token and prepend the length as described on the page linked above, in Python 2.x: token = "eyJ0eXAiOi..."; exptoken = ""; for i in token: exptoken += i; exptoken += chr(0); tokenstruct = struct.pack("=i", len(exptoken)) + exptoken; conn = pyodbc.connect(connstr, attrs_before = { 1256:bytearray(tokenstruct) }); 3.x is only slightly more involved due to annoying char/bytes split: token = b"eyJ0eXAiOi..."; exptoken = b""; for i in token: exptoken += bytes({i}); exptoken += bytes(1); tokenstruct = struct.pack("=i", len(exptoken)) + exptoken; conn = pyodbc.connect(connstr, attrs_before = { 1256:tokenstruct }); (SQL_COPT_SS_ACCESS_TOKEN is 1256; it's specific to msodbcsql driver so pyodbc does not have it defined, and likely will not.) Hope this helps. A: You can get a token via from azure.identity import DeviceCodeCredential # Recommended to allocate a new ClientID in your tenant. AZURE_CLI_CLIENT_ID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" credential = DeviceCodeCredential(client_id=AZURE_CLI_CLIENT_ID) databaseToken = credential.get_token('https://database.windows.net/.default') Then use databaseToken.token as an AAD Access Token as described in Leon Yue's answer.
unknown
d5254
train
You can disable cors like that : fetch('https://www.coinbase.com/oauth/authorize?response_type=code&client_id=cc460ce71913c49e4face4ac0e072c38564fabea867ebcd7ab9905970d8f3021&redirect_uri=http://localhost:3000/callback&state=SECURE_RANDOM&scope=wallet:accounts:read', { mode: 'no-cors', method:'GET' }).then(res => res.json()) .then( (result) => { console.log(result) }, (error) => { console.log(error) } )
unknown
d5255
train
[SOLUTION] library(splines) library(ggplot2) library(nlme) library(gridExtra) datanew1$DummyVariable = as.factor(datanew1$DummyVariable) datanew1$Variable2 = as.factor(datanew1$Variable2) datanew1$Variable3 = as.factor(datanew1$Variable3) model <- lme(Response~(bs(Variable1, df=3)) + DummyVariable, random=~1|Variable2/Variable3, datanew1, method="REML") completemodel <- update(model, weights = varIdent(form=~1|DummyVariable)) df_model <- broom.mixed::augment(completemodel) #> Registered S3 method overwritten by 'broom.mixed': #> method from #> tidy.gamlss broom df_model[".stdresid"] <- resid(completemodel, type = "pearson") p1 <- ggplot(df_model, aes(.fitted, .resid)) + geom_point() + geom_hline(yintercept = 0) + geom_smooth(se=FALSE) p2 <- ggplot(df_model, aes(sample = .stdresid)) + geom_qq() + geom_qq_line() grid.arrange(p1,p2) #> `geom_smooth()` using method = 'gam' and formula 'y ~ s(x, bs = "cs")'
unknown
d5256
train
You can iterate over keys in a dictionary. myDict = {'one': 1, "two": 2} for key in myDict: print(key) You could check for the value associated with a key, and add the key to a list if it meets a certain test. A: You can try r_numbers = {y:x for x,y in numbers.items()} r_numbers Out[1]: {1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six'} [r_numbers[x] for x in sorted(r_numbers.keys())[:5]] Out[2] : ['one', 'two', 'three', 'four', 'five']
unknown
d5257
train
Just define a separate server for port 8443, and do a redirect from there. You'd obviously still have to have a proper certificate for your 8443 server, too. server { listen 8443 ssl; server_name example.com; ssl_...; return 301 https://example.com$request_uri; }
unknown
d5258
train
Try after_save. A: The autoincrement ID does not exist for an ActiveRecord object until it has been saved. It's possible to get the next autoincrement ID for a table, but this doesn't guarantee that the ID will be given to your object when saved since another record may have been added in the meantime.
unknown
d5259
train
This issue has been resolved: I defined the MySql port as 8080 by mistake. I corrected the port to 3306.
unknown
d5260
train
Yes; use the 9-argument form of drawImage() to draw the slice of the canvas (i.e., the source) onto itself (i.e., the destination) as follows: function rescale(slice_start) { canvas.getContext('2d').drawImage(canvas, slice_start, 0, slice_start + 200, 2000, 0, 0, 800, 2000) }
unknown
d5261
train
Here is the demo of how you should use angular.module('myapp', []).controller('ctrl', function($scope, $window){ $scope.data = 0; $scope.changeData = function(){ $scope.data = Math.random(); } $scope.$watch('data', function(newValue, oldValue){ console.log(newValue); }, true); }); Hope this may help you
unknown
d5262
train
Is this what you are looking for? model.three <- lm(log(y2) ~ log(X)) plot(X,predict(model.three)) ## Instead of abline(), use this: lines(model.three$fitted.values) A: Your data expresses an exponential relationship between Y and X, which is Y = exp(X) + eps where eps is some noise. Therefore, I would suggest fitting a model between log(Y) and X, to capture the linear relationship between the two: model.three <- lm(log(y2) ~ X) summary(model.three) The summary confirms that the relationship captured is as expected (i.e. the coefficient for X is very close to 1). Since plotting the data on a linear scale will not be useful, I think it is a good idea to plot the fitted straight line with abline. Note: to be exact, it would be more accurate to capture the relationship between y2 and exp(X), but with your data, the fit is essentially perfect.
unknown
d5263
train
You can add the attribute android:textColor="@drawable/color_selector" into your <ToggleButton> //color_selector.xml <selector xmlns:android="http://schemas.android.com/apk/res/android"> <item android:state_checked="true" color="@color/text_on" /> <item android:state_checked="false" color="@color/text_off" /> </selector> A: Make another selector in location ./res/color/my_selector.xml and set it to ToggleButton android:textColor="@color/my_selector" example my_selector.xml <?xml version="1.0" encoding="utf-8"?> <selector xmlns:android="http://schemas.android.com/apk/res/android"> <item color="@color/text_active" android:state_checked="true"/> <item color="@color/text_default" android:state_checked="false"/> </selector> A: In your code, use an OnClickListener. protected void onCreate(Bundle savedInstanceState) { final ToggleButton tb = new ToggleButton(DataHandler.getContext()); tb.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { if (tb.isChecked()) { tb.setTextColor(Color.GREEN); } else { tb.setTextColor(Color.RED); } } }); }
unknown
d5264
train
You can use react-native-fs for that. CachesDirectoryPath (String) The absolute path to the caches directory ExternalCachesDirectoryPath (String) The absolute path to the external caches directory (android only)
unknown
d5265
train
There is no need to use ng-attr-tabindex, it can simply be done with interpolation: <div class="flashcard-front"> <textarea ng-model="card.front" class="form-control flashcard-content" tabindex="{{card.flipped ? -1 : 0}}"></textarea> </div> <div class="flashcard-back"> <textarea ng-model="card.back" class="form-control flashcard-content" tabindex="{{!card.flipped ? -1 : 0}}"></textarea> </div> The problem with the code in the question is that the interpolation needs double curly brackets ({{ }}). The ng-attr-* syntax is only necessary in exotic situations. For more information, see * *AngularJS Developer Guide - Interpolation *AngularJS Developer Guide - ngAttr for binding to arbitrary attributes A: Credit to @Phix for the suggestion to use ng-attr. The relevant part is ng-attr-tabindex="{{card.flipped ? -1 : 0}}" and the same but with !card.flipped instead of card.flipped. My full code is: <div class="flashcard-front"> <textarea ng-model="card.front" class="form-control flashcard-content" ng-attr-tabindex="{{card.flipped ? -1 : 0}}"></textarea> </div> <div class="flashcard-back"> <textarea ng-model="card.back" class="form-control flashcard-content" ng-attr-tabindex="{{!card.flipped ? -1 : 0}}"></textarea> </div> Angular Docs
unknown
d5266
train
I know this was asked a long time ago but here's how to highlight the listbox items using a search entry box in Tkinter. Explanation: * *all_listbox_items is the most important part. When the program first populates the listbox, it's important to capture those initial values and use the search entry box as a way to query these initial entries. *add a textvariable to entry_1 and name it search_var. *put a trace on search_var so if the values in the entry_1 change, we can trigger a function named highlight_searched. *The function highlight_searched basically enumerates all the listbox items and checks to see if the searched text in entry_1 is in any of the items from the listbox. Where this is true, you use selection_set(i) (where i is short-hand for index) to select each item. I'm not particularly happy with using enumerate. It's just easy. There surely is a better way to return the index of an item in a for loop. I'd love to hear a better solution to my enumeration suggestion. Code: from tkinter import * #definitions root = Tk() var_1 = StringVar(root) label_1 = Label(root, text="Search:") label_2 = Label(root, text="Subject specific programs below") label_3 = Label(root, text="Subject:") def highlight_searched(*args): search = search_var.get() for i,item in enumerate(all_listbox_items): if search.lower() in item.lower(): listbox_2.selection_set(i) else: listbox_2.selection_clear(i) if search == '': listbox_2.selection_clear(0, END) search_var = StringVar() search_var.trace('w', highlight_searched) entry_1 = Entry(root, textvariable=search_var) #this should search through the strings listed under listbox_2 configs button_1 = Button(root, text="Install") scrollbar_1 = Scrollbar(root) listbox_2 = Listbox(root, yscrollcommand=scrollbar_1.set) optionmenu_1 = OptionMenu(root, var_1, "Computing", "Engineering", "Physics") string_1 = StringVar(root, name="Google Chrome") string_2 = StringVar(root, name="Thunderbird") string_3 = StringVar(root, name="Adobe Reader X") string_4 = StringVar(root, name="WinRAR") string_5 = StringVar(root, name="OpenOffice") string_6 = StringVar(root, name="Program 1") string_7 = StringVar(root, name="Program 2") string_8 = StringVar(root, name="Program 3") string_9 = StringVar(root, name="Program 4") string_10 = StringVar(root, name="Program 5") string_11 = StringVar(root, name="Program 6") #configuration root.title("Network Installation") listbox_2.insert(1, string_1) listbox_2.insert(2, string_2) listbox_2.insert(3, string_3) listbox_2.insert(4, string_4) listbox_2.insert(5, string_5) listbox_2.insert(6, string_6) listbox_2.insert(7, string_7) listbox_2.insert(8, string_8) listbox_2.insert(9, string_9) listbox_2.insert(10, string_10) listbox_2.insert(11, string_11) optionmenu_1.config(width=15) scrollbar_1.config(command=listbox_2.yview) #grid additions label_1.grid(row=0, column=5) label_2.grid(columnspan=6, row=1, column=0, sticky=E) entry_1.grid(row=0, column=6) button_1.grid(columnspan=2, row=7, column=5) listbox_2.grid(rowspan=6, columnspan=6, row=2, column=0) scrollbar_1.grid(rowspan=6, row=2, column=4, sticky=N+S) optionmenu_1.grid(columnspan=3, row=0, column=1) label_3.grid(row=0, column=0) all_listbox_items = listbox_2.get(0, END) root.mainloop() And anyone showing up to this question that might be looking for a search box that filters the listbox (as opposed to highlighting them like this question is asking) to show only the relevant items pertaining to the search, you can use this: import tkinter as tk root = tk.Tk() def update_listbox(*args): search_term = search_var.get() listbox.delete(0, tk.END) for item in all_items: if search_term.lower() in item.lower(): listbox.insert(tk.END, item) search_var = tk.StringVar() search_var.trace('w', update_listbox) searchbox = tk.Entry(root, textvariable=search_var) searchbox.pack(fill=tk.X, expand=False) listbox = tk.Listbox(root) for i in ['Adam', 'Lucy', 'Barry', 'Bob']: listbox.insert(tk.END, i) listbox.pack() all_items = listbox.get(0, tk.END) root.mainloop()
unknown
d5267
train
Does the layout work on another views? Try to create a new view and check "Use a layout page" and then select your layout view file. Hope this works, it has always worked for me
unknown
d5268
train
You do not set parameters to your SQL query. After state = con.prepareStatement(sql); you need to set actual parameters using state.setXXX(index, value); state = con.prepareStatement(sql); state.setInt(1, id); state.setString(2, name); state.setInt(3, capacity); state.executeUpdate(); And as mentioned in comments you need to at least add logging to your catch blocks. And connection and preparedStatement objects should be closed when are not needed anymore. EDIT In your connect method you close connection object in finally block and return closed connection. And then you try to use closed connection in your newItem() method.
unknown
d5269
train
The object is probably being serialized using the Java Object Serialization Protocol. You can verify this by looking for the magic number 0xACED at the beginning. If this is the case, it's just wrapped with some meta information about the class and length, and you can easily parse the actual byte values off the end. In particular, you would see 0xAC 0xED 0x00 0x05 for the header followed by a classDesc element that would be 0x75 ...bytes... 0x70, followed by a 4 byte length, and the then the bytes themselves. Java serializes the length and other multibyte values in big-endian format.
unknown
d5270
train
You have to use the currentTextChanged signal that notifies you if the QComboBox selection has been changed sending you the new text, then you should only compare it with the text and together with the setVisible() method fulfill your requirement. self.comboBox.currentTextChanged.connect(self.handle_current_text_changed) def handle_current_text_changed(self, text): self.checkBox.setVisible(text == "Yes") A: Use signals and slots to do this. Capture the Combobox's editTextChanged signal with a slot hideCheckBox. comboBox.currentTextChanged.connect(func) In the function func simply setVisibility to false when the text is "NO" and true when text is "YES". A: If you want to hide the checkbox when the comboBox state is HIDE, for example, and unhide checkBox when the combobox state is UNHIDE, use an IF construction to catch the state of the combobox. Depending on the state, apply one or the other value to the checkbox: class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(762, 590) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.checkBox = QtWidgets.QCheckBox('box', self.centralwidget) self.checkBox.setGeometry(QtCore.QRect(150, 75, 181, 20)) self.checkBox.setObjectName("checkBox") self.comboBox = QtWidgets.QComboBox(self.centralwidget) self.comboBox.setGeometry(QtCore.QRect(150,160,100,20)) self.comboBox.addItem("UNHIDE") self.comboBox.addItem("HIDE") self.comboBox.setObjectName("comboBox") MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) self.comboBox.currentTextChanged.connect(self.hidefunction) # code for connect to function below def hidefunction(self): text = str(self.comboBox.currentText()) # this is the state of the current text in combobox. Below simple IF block. if text == "UNHIDE": self.checkBox.setHidden(False) # its HIDE - your checkBox when comboBox changed his state else: self.checkBox.setHidden(True) # its HIDE your checkBox when comboBox changed his state def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "11")) MainWindow.show() if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_())
unknown
d5271
train
You can setup callback url's host with OmniAuth.config.full_host like: OmniAuth.config.full_host = "http://yourapp.dev" This must be placed before omniauth is called. I think config/initializes/omniauth.rb is good.
unknown
d5272
train
Yes you will have to use a Service to send data in the background as well as foreground. You can use android Service or Background Service, based on your requirements I would suggest using the Background Service. Here are the links for both: Service: http://developer.android.com/guide/components/services.html Background Service: https://developer.android.com/training/run-background-service/create-service.html A: if you want the location irrespective of the fact that the application is open or not then you need to have a service which will keep on posting your location to the server. Here's how you can do it ... Read this answer Note : Activities can't do what you want to achieve as they exist till the user exits them (by pressing the back button, killing them or clearing the stack[Need not think of it for now if a newbie]). A: There is a good article on android.com website https://developer.android.com/training/location/receive-location-updates.html
unknown
d5273
train
Is this what you are looking for? Since you have the id of each section in the href you can pull that to load the appropriate one: JS var currentTab = $(this).find("a").attr("href"); $(currentTab).show().siblings("section").hide(); Change CSS (unless you want the elements to take up space on the page its better to set to display: none): #tabs-content section.show { display: block } #tabs-content section.hide { display: none; } JSFIDDLE
unknown
d5274
train
numpy 1.17 just introduced [quoting] "..three strategies implemented that can be used to produce repeatable pseudo-random numbers across multiple processes (local or distributed).." the 1st strategy is using a SeedSequence object. There are many parent / child options there, but for our case, if you want the same generated random numbers, but different at each run: (python3, printing 3 random numbers from 4 processes) from numpy.random import SeedSequence, default_rng from multiprocessing import Pool def rng_mp(rng): return [ rng.random() for i in range(3) ] seed_sequence = SeedSequence() n_proc = 4 pool = Pool(processes=n_proc) pool.map(rng_mp, [ default_rng(seed_sequence) for i in range(n_proc) ]) # 2 different runs [[0.2825724770857644, 0.6465318335272593, 0.4620869345284885], [0.2825724770857644, 0.6465318335272593, 0.4620869345284885], [0.2825724770857644, 0.6465318335272593, 0.4620869345284885], [0.2825724770857644, 0.6465318335272593, 0.4620869345284885]] [[0.04503760429109904, 0.2137916986051025, 0.8947678672387492], [0.04503760429109904, 0.2137916986051025, 0.8947678672387492], [0.04503760429109904, 0.2137916986051025, 0.8947678672387492], [0.04503760429109904, 0.2137916986051025, 0.8947678672387492]] If you want the same result for reproducing purposes, you can simply reseed numpy with the same seed (17): import numpy as np from multiprocessing import Pool def rng_mp(seed): np.random.seed(seed) return [ np.random.rand() for i in range(3) ] n_proc = 4 pool = Pool(processes=n_proc) pool.map(rng_mp, [17] * n_proc) # same results each run: [[0.2946650026871097, 0.5305867556052941, 0.19152078694749486], [0.2946650026871097, 0.5305867556052941, 0.19152078694749486], [0.2946650026871097, 0.5305867556052941, 0.19152078694749486], [0.2946650026871097, 0.5305867556052941, 0.19152078694749486]] A: If no seed is provided explicitly, numpy.random will seed itself using an OS-dependent source of randomness. Usually it will use /dev/urandom on Unix-based systems (or some Windows equivalent), but if this is not available for some reason then it will seed itself from the wall clock. Since self-seeding occurs at the time when a new subprocess forks, it is possible for multiple subprocesses to inherit the same seed if they forked at the same time, leading to identical random variates being produced by different subprocesses. Often this correlates with the number of concurrent threads you are running. For example: import numpy as np import random from multiprocessing import Pool def Foo_np(seed=None): # np.random.seed(seed) return np.random.uniform(0, 1, 5) pool = Pool(processes=8) print np.array(pool.map(Foo_np, xrange(20))) # [[ 0.14463001 0.80273208 0.5559258 0.55629762 0.78814652] <- # [ 0.14463001 0.80273208 0.5559258 0.55629762 0.78814652] <- # [ 0.14463001 0.80273208 0.5559258 0.55629762 0.78814652] <- # [ 0.14463001 0.80273208 0.5559258 0.55629762 0.78814652] <- # [ 0.14463001 0.80273208 0.5559258 0.55629762 0.78814652] <- # [ 0.14463001 0.80273208 0.5559258 0.55629762 0.78814652] <- # [ 0.14463001 0.80273208 0.5559258 0.55629762 0.78814652] <- # [ 0.64672339 0.99851749 0.8873984 0.42734339 0.67158796] # [ 0.64672339 0.99851749 0.8873984 0.42734339 0.67158796] # [ 0.64672339 0.99851749 0.8873984 0.42734339 0.67158796] # [ 0.64672339 0.99851749 0.8873984 0.42734339 0.67158796] # [ 0.64672339 0.99851749 0.8873984 0.42734339 0.67158796] # [ 0.11283279 0.28180632 0.28365286 0.51190168 0.62864241] # [ 0.11283279 0.28180632 0.28365286 0.51190168 0.62864241] # [ 0.28917586 0.40997875 0.06308188 0.71512199 0.47386047] # [ 0.11283279 0.28180632 0.28365286 0.51190168 0.62864241] # [ 0.64672339 0.99851749 0.8873984 0.42734339 0.67158796] # [ 0.11283279 0.28180632 0.28365286 0.51190168 0.62864241] # [ 0.14463001 0.80273208 0.5559258 0.55629762 0.78814652] <- # [ 0.11283279 0.28180632 0.28365286 0.51190168 0.62864241]] You can see that groups of up to 8 threads simultaneously forked with the same seed, giving me identical random sequences (I've marked the first group with arrows). Calling np.random.seed() within a subprocess forces the thread-local RNG instance to seed itself again from /dev/urandom or the wall clock, which will (probably) prevent you from seeing identical output from multiple subprocesses. Best practice is to explicitly pass a different seed (or numpy.random.RandomState instance) to each subprocess, e.g.: def Foo_np(seed=None): local_state = np.random.RandomState(seed) print local_state.uniform(0, 1, 5) pool.map(Foo_np, range(20)) I'm not entirely sure what underlies the differences between random and numpy.random in this respect (perhaps it has slightly different rules for selecting a source of randomness to self-seed with compared to numpy.random?). I would still recommend explicitly passing a seed or a random.Random instance to each subprocess to be on the safe side. You could also use the .jumpahead() method of random.Random which is designed for shuffling the states of Random instances in multithreaded programs. A: Here is a nice blog post that will explains the way numpy.random works. If you use np.random.rand() it will takes the seed created when you imported the np.random module. So you need to create a new seed at each thread manually (cf examples in the blog post for example). The python random module does not have this issue and automatically generates different seed for each thread.
unknown
d5275
train
You can sort it this way: select DISTINCT gameYear from game order by case when gameYear = 2007 then 0 else gameYear end; A: You probably fill the dropdown by looping over your SQL result set and output HTML <option> elements. You need to check inside this loop if the current loop value is equal to the year of your game. If it is add the selected HTML attribute. A: SelectedYear is Your current Selected Year SELECT '$SelectedYear' as gameYear UNION SELECT * FROM (SELECT gameYear from game WHERE gameYear <> $SelectedYear ORDER BY gameYear)
unknown
d5276
train
std:.for_each does not update the elements in the range the way you expect. std::for_each applies the lambda to each element, but does not care about the return value from the lambda. You want std::transform for that: std::transform(param.begin(), param.end(), param.begin(), f1); // ^^^^^^^^^^^^^ // | // it starts writing the result here To work with std::for_each you would need to receive the argument by-reference in the lambda and make the update to the argument directly instead of returning it: auto f1 = [](int& n) { n += 5; }; // int& and add to n directly std::for_each(param.begin(), param.end(), f1);
unknown
d5277
train
Can be something wrong with the user variable. Can you check this: const user={'name':req.body.name,'password':req.body.password} Update I tried out: var data = []; const user={'name':"Deshan",'password':"password"} data.push(user); console.log(data); And the result was as follow: [ { name: 'Deshan', password: 'password' } ] So it maybe a problem with the request data.
unknown
d5278
train
If I understood correctly, you want to split a big file in smaller files with maximum of 10k lines. I see 2 problems on your code: * *You never change the FullFilePath variable. So you will always rewrite on the same file *You always read and write the whole source file to the target file. I rewrote your code to fit the behavior I said earlier. You just have to modify the strings. int maxRecordsPerFile = 10000; int currentFile = 1; using (StreamReader sr = new StreamReader("source.txt")) { int currentLineCount = 0; List<string> content = new List<string>(); while (!sr.EndOfStream) { content.Add(sr.ReadLine()); if (++currentLineCount == maxRecordsPerFile || sr.EndOfStream) { using (StreamWriter sw = new StreamWriter(string.Format("file{0}.txt", currentFile))) { foreach (var line in content) sw.WriteLine(line); } content = new List<string>(); currentFile++; currentLineCount = 0; } } } Of course you can do better than that, as you don't need to create that string and do that foreach loop. I just made this quick example to give you the idea. To improve the performance is up to you
unknown
d5279
train
Surely not the exact case you are looking for but you can check out Solr with Mahout. Mahout provides support for LDA for topic modeling, which will help you to group topics from your dataset A topic model is, roughly, a hierarchical Bayesian model that associates with each document a probability distribution over "topics", which are in turn distributions over words. For instance, a topic in a collection of newswire might include words about "sports", such as "baseball", "home run", "player", and a document about steroid use in baseball might include "sports", "drugs", and "politics". Note that the labels "sports", "drugs", and "politics", are post-hoc labels assigned by a human, and that the algorithm itself only assigns associate words with probabilities. The task of parameter estimation in these models is to learn both what the topics are, and which documents employ them in what proportions. So if within a dataset if you have documents for Mobiles, you would get a group of terms with blackberry, iphone, mobile and so on. These may not be similar terms but would relate to the same topic.
unknown
d5280
train
Try this: #main-content { float: left; // float element to the left side width:80%; padding-left: 113px; padding-top: 20px; } #sidebar{ border-top: 1px solid #99CC33; border-left: 1px solid #99CC33; height: 300px; width: 200px; margin-right: 5px; padding: 5px 0 0 5px; position:absolute; right: 0; // position element to the right } EDIT: Sorry. You wanted sidebar on the right.
unknown
d5281
train
I think that overriding get_form_kwargs is ok. If all the kwargs are instance attributes, then I would update the instance in the get_form_kwargs method. Then you shouldn't have to override the form's __init__, or update the instance's attributes in the form_valid method. def get_form_kwargs(self, **kwargs): kwargs = super(CreateAdvertisment, self).get_form_kwargs() if kwargs['instance'] is None: kwargs['instance'] = Advertisement() kwargs['instance'].advertiser = self.advertiser ... return kwargs In the model's clean method, you can now access self.advertiser. A: alasdairs proposal works fine I have the following now : def get_form_kwargs(self, **kwargs): kwargs = super(CreateAdvertisment, self).get_form_kwargs() if kwargs['instance'] is None: kwargs['instance'] = Advertisement() kwargs['instance'].advertiser = self.advertiser kwargs['instance'].share_type = self.share_type kwargs['instance'].country = self.country kwargs['instance'].postal_code = self.postal_code kwargs['instance'].position = self.position return kwargs def form_valid(self, form): ret = super(CreateAdvertisment, self).form_valid(form) return ret Of course there is no need to override form_valid anymore. I have just included here in order to display that we do not set the instance fields anymore as this is done in get_form_kwargs() already
unknown
d5282
train
instead of document.getElementById('password').style.display = 0; try document.getElementById('password').style.display = 'none'; A: You can't really remove divs as far as I know but you can set the visibility of a div to "none" (not "0" as you tried it). style="display:none" This makes the div invisible and there won't be any white space.
unknown
d5283
train
First you have to remove parentheses of the function as parameter, in this line sliders.push(new slider('paletteControl','pSlider',0,255,100,sliderChange())); It becomes sliders.push(new slider('paletteControl','pSlider',0,255,100,sliderChange)); Then you get the change function like this (without parentheses) $(id).slider({ range: "min", min: sliders[i].min, max: sliders[i].max, value: sliders[i].defaultvalue, create: function() { handle.text( $( this ).slider( "value" ) ); }, change: sliders[i].changeFunc }); OR $(id).slider({ range: "min", min: sliders[i].min, max: sliders[i].max, value: sliders[i].defaultvalue, create: function() { handle.text( $( this ).slider( "value" ) ); } }); $( id ).on( "slidechange", function( event, ui ) {} ); A: You may call the changeFunc using window[value.changeFunc] as shown below function newSlider(context, id, min, max, defaultvalue, changeFunc) { this.context = context; this.id = id; this.min = min; this.max = max; this.defaultvalue = defaultvalue; this.changeFunc = changeFunc; } function func1() { $('#output').append("func1<br>"); } function func2() { $('#output').append("func2<br>"); } function func3() { $('#output').append("func3<br>"); } var sliders = []; sliders.push(new newSlider('paletteControl', 'pSlider1', 0, 255, 100, "func1")); sliders.push(new newSlider('paletteControl', 'pSlider2', 0, 255, 200, "func2")); sliders.push(new newSlider('paletteControl', 'pSlider3', 0, 255, 70, "func3")); $.each(sliders, function(index, value) { $("#" + value.id).slider({ range: "min", min: value.min, max: value.max, value: value.defaultvalue, change: window[value.changeFunc] }); }); Working code: https://plnkr.co/edit/nwjOrnwoI7NU3MY8jXYl?p=preview
unknown
d5284
train
This is probably an issue with the whois and how it maps the IP to a location. Take a look at this file, it contains the IP address ranges for the Azure datacenters. Here's what you'll see for West Europe: <subregion name="West Europe"> .. <network>168.63.0.0/19</network> <network>168.63.96.0/19</network> .. </subregion> Now, since this is IP range is in CIDR notation, there are a few tools which make it easy to find the complete range, like this one. So actually, 168.63.96.0/19 = 168.63.96.0 - 168.63.127.255. And this range includes 168.63.108.xx. So there's no issue with your deployment and you can be sure it's located in West Europe.
unknown
d5285
train
You can allways pass in a Delegate and call DynamicInvoke on it: MyClass MyMethod(Delegate x) { // ... x.DynamicInvoke(....); // ... } A: You can just use delegate if you want, although it's a bit old school :) public void TestInvokeDelegate() { InvokeDelegate( new TestDelegate(ShowMessage), "hello" ); } public void InvokeDelegate(TestDelegate del, string message) { del(message); } public delegate void TestDelegate(string message); public void ShowMessage(string message) { Debug.WriteLine(message); } A: It looks like you are trying to implement the Visitor pattern. In this case visiting methods usually have only one parameter - the instance to visit. Having additional arguments passed around conceals the use of the pattern and makes it harder to reason about. This article shows you one way to implement it in C#. The key is to create a visitor class that will encapsulate all the parameters that affect the visiting process. This way you don't need to pass anythnig other than an object in question in the visiting method - everything else lives in instance fields. However, if you really want to pass some additional parameters in the method and don't know what type they can have, there are ways to do that. More or less standard approach in .NET world is to use a delegate without return value and with single parameter of type object, the example would be ParameterizedThreadStart delegate: public delegate void ParameterizedThreadStart( Object obj ) This way you get to pass only one parameter in the delegate, but it could be anything - an instance of a class, an array or null, if you end up not needing additional arguments after all. The downside of this approach is that it requires type casting which can lead to runtime errors.
unknown
d5286
train
I faced same problem, finally I solve it using this code to pass integer value to Bigdecimal payment.setSubtotal(new BigDecimal("10")); instead of using: payment.setSubtotal(new BigDecimal(10)); e.g.: public void onClick(View v) { PayPalPayment payment = new PayPalPayment(); payment.setSubtotal(new BigDecimal("10")); payment.setCurrencyType("USD"); payment.setRecipient("[email protected]"); // payment.setPaymentType(PayPal.PAYMENT_TYPE_GOODS); Intent checkoutIntent = PayPal.getInstance().checkout(payment, this); startActivityForResult(checkoutIntent, 1); }
unknown
d5287
train
The assign() function has a twin called get(). This is the function that you need. Refer to this concise and easy-to-understand article here.
unknown
d5288
train
The comment in the question about the requirements inspired me to implement this in terms of k * step instead of some other mechanism controlling the number of iterations over the container. template <class Container> void go(const Container& C) { const size_t sz = C.size(); if(idx >= sz) return; size_t k_max = (sz - idx) / step + 1; size_t k = 0 for(auto it = std::advance(C.begin(), idx); k < k_max && (std::advance(it, step), true); ++k) { /* do something with *it */ } } A: You might use helper functions: template <typename IT> IT secure_next(IT it, std::size_t step, IT end, std::input_iterator_tag) { while (it != end && step--) { ++it; } return it; } template <typename IT> IT secure_next(IT it, std::size_t step, IT end, std::random_access_iterator_tag) { return end - it < step ? end : it + step; } template <typename IT> IT secure_next(IT it, std::size_t step, IT end) { return secure_next(it, step, end, typename std::iterator_traits<IT>::iterator_category{}); } And then: for (auto it = secure_next(C.begin(), idx, C.end()); it != C.end(); it = secure_next(it, step, C.end()) { /* do something with *it */ } Alternatively, with range-v3, you could do something like: for (const auto& e : C | ranges::view::drop(idx) | ranges::view::stride(step)) { /* do something with e */ } A: One option is to adapt the iterator so that it is safe to advance past the end. Then you can use stock std::next(), std::advance(), pass it to functions expecting an iterator, and so on. Then the strided iteration can look almost exactly like you want: template<class Container, class Size> void iterate(const Container& c, Size idx, Size step) { if (unlikely(idx < 0 || step <= 0)) return; bounded_iterator it{begin(c), c}; for (std::advance(it, idx); it != end(c); std::advance(it, step)) test(*it); } This is not dissimilar from the secure_next() suggestion. It is a little more flexible, but also more work. The range-v3 solution looks even nicer but may or may not be an option for you. Boost.Iterator has facilities for adapting iterators like this, and it's also straightforward to do it directly. This is how an incomplete sketch might look for iterators not supporting random access: template<class Iterator, class Sentinel, class Size> class bounded_iterator { public: using difference_type = typename std::iterator_traits<Iterator>::difference_type; using value_type = typename std::iterator_traits<Iterator>::value_type; using pointer = typename std::iterator_traits<Iterator>::pointer; using reference = typename std::iterator_traits<Iterator>::reference; using iterator_category = typename std::iterator_traits<Iterator>::iterator_category; template<class Container> constexpr explicit bounded_iterator(Iterator begin, const Container& c) : begin_{begin}, end_{end(c)} { } constexpr auto& operator++() { if (begin_ != end_) ++begin_; return *this; } constexpr reference operator*() const { return *begin_; } friend constexpr bool operator!=(const bounded_iterator& i, Sentinel s) { return i.begin_ != s; } // and the rest... private: Iterator begin_; Sentinel end_; }; template<class Iterator, class Container> bounded_iterator(Iterator, const Container&) -> bounded_iterator<Iterator, decltype(end(std::declval<const Container&>())), typename size_type<Container>::type>; And for random access iterators: template<RandomAccessIterator Iterator, class Sentinel, class Size> class bounded_iterator<Iterator, Sentinel, Size> { public: using difference_type = typename std::iterator_traits<Iterator>::difference_type; using value_type = typename std::iterator_traits<Iterator>::value_type; using pointer = typename std::iterator_traits<Iterator>::pointer; using reference = typename std::iterator_traits<Iterator>::reference; using iterator_category = typename std::iterator_traits<Iterator>::iterator_category; template<class Container> constexpr explicit bounded_iterator(Iterator begin, const Container& c) : begin_{begin}, size_{std::size(c)}, index_{0} { } constexpr auto& operator+=(difference_type n) { index_ += n; return *this; } constexpr reference operator*() const { return begin_[index_]; } friend constexpr bool operator!=(const bounded_iterator& i, Sentinel) { return i.index_ < i.size_; } // and the rest... private: const Iterator begin_; const Size size_; Size index_; }; As an aside, it seems GCC produces slightly better code with this form than with my attempts at something like secure_next(). Can its optimizer reason better about indices than pointer arithmetic? This example is shared also via gist and godbolt.
unknown
d5289
train
xargs -P 10 | curl GNU xargs -P can run multiple curl processes in parallel. E.g. to run 10 processes: xargs -P 10 -n 1 curl -O < urls.txt This will speed up download 10x if your maximum download speed if not reached and if the server does not throttle IPs, which is the most common scenario. Just don't set -P too high or your RAM may be overwhelmed. GNU parallel can achieve similar results. The downside of those methods is that they don't use a single connection for all files, which what curl does if you pass multiple URLs to it at once as in: curl -O out1.txt http://exmple.com/1 -O out2.txt http://exmple.com/2 as mentioned at https://serverfault.com/questions/199434/how-do-i-make-curl-use-keepalive-from-the-command-line Maybe combining both methods would give the best results? But I imagine that parallelization is more important than keeping the connection alive. See also: Parallel download using Curl command line utility A: Here is how I do it on a Mac (OSX), but it should work equally well on other systems: What you need is a text file that contains your links for curl like so: http://www.site1.com/subdirectory/file1-[01-15].jpg http://www.site1.com/subdirectory/file2-[01-15].jpg . . http://www.site1.com/subdirectory/file3287-[01-15].jpg In this hypothetical case, the text file has 3287 lines and each line is coding for 15 pictures. Let's say we save these links in a text file called testcurl.txt on the top level (/) of our hard drive. Now we have to go into the terminal and enter the following command in the bash shell: for i in "`cat /testcurl.txt`" ; do curl -O "$i" ; done Make sure you are using back ticks (`) Also make sure the flag (-O) is a capital O and NOT a zero with the -O flag, the original filename will be taken Happy downloading! A: As others have rightly mentioned: -cat urls.txt | xargs -0 curl -O +cat urls.txt | xargs -n1 curl -O However, this paradigm is a very bad idea, especially if all of your URLs come from the same server -- you're not only going to be spawning another curl instance, but will also be establishing a new TCP connection for each request, which is highly inefficient, and even more so with the now ubiquitous https. Please use this instead: -cat urls.txt | xargs -n1 curl -O +cat urls.txt | wget -i/dev/fd/0 Or, even simpler: -cat urls.txt | wget -i/dev/fd/0 +wget -i/dev/fd/0 < urls.txt Simplest yet: -wget -i/dev/fd/0 < urls.txt +wget -iurls.txt A: A very simple solution would be the following: If you have a file 'file.txt' like url="http://www.google.de" url="http://www.yahoo.de" url="http://www.bing.de" Then you can use curl and simply do curl -K file.txt And curl will call all Urls contained in your file.txt! So if you have control over your input-file-format, maybe this is the simplest solution for you! A: Or you could just do this: cat urls.txt | xargs curl -O You only need to use the -I parameter when you want to insert the cat output in the middle of a command. A: This works for me: $ xargs -n 1 curl -O < urls.txt I'm in FreeBSD. Your xargs may work differently. Note that this runs sequential curls, which you may view as unnecessarily heavy. If you'd like to save some of that overhead, the following may work in bash: $ mapfile -t urls < urls.txt $ curl ${urls[@]/#/-O } This saves your URL list to an array, then expands the array with options to curl to cause targets to be downloaded. The curl command can take multiple URLs and fetch all of them, recycling the existing connection (HTTP/1.1), but it needs the -O option before each one in order to download and save each target. Note that characters within some URLs ] may need to be escaped to avoid interacting with your shell. Or if you are using a POSIX shell rather than bash: $ curl $(printf ' -O %s' $(cat urls.txt)) This relies on printf's behaviour of repeating the format pattern to exhaust the list of data arguments; not all stand-alone printfs will do this. Note that this non-xargs method also may bump up against system limits for very large lists of URLs. Research ARG_MAX and MAX_ARG_STRLEN if this is a concern.
unknown
d5290
train
I found the solution myself. I seems I had the limit of 10 Google Cloud projects (the standard limit I think), and since Firebase actually uses Google Cloud as well, then I had to delete some old Google Cloud projects I did not use anymore. Then my old firebase projects showed up in the new Firebase console and I could import them into the new console.
unknown
d5291
train
Yes, there are lot of ways that this can be handled. A simple google search could have helped you out. The most simple way is to set OnClickListener() for the Login Button. The listener method will be called when the button is clicked. Inside the method, you can check if the Edittext field is empty using the TextUtils.isEmpty("TEXT INSIDE EDITEXT") method. The text inside the edit text can be read using the getText() method. Please have a look at the below code for reference. public OnClickListener onLoginClick = new OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { EditText emailField = (EditText)layout.findViewById(R.id. emailEt); String email = emailField.getText().toString() if(TextUtils.isEmpty(email)) { // Show error here } else { // Do the necessary action here. } } }; Similarly, add a check for the password field. In the XML file, set the listener to the Login button like this. android:onClick="onLoginClick" A: yourEditext.addTextChangedListener(new TextWatcher() { @Override public void onTextChanged(CharSequence s, int start, int before, int count) { //Here you can store email into variable before clicking button // TODO Auto-generated method stub } @Override public void beforeTextChanged(CharSequence s, int start, int count, int after) { // TODO Auto-generated method stub } @Override public void afterTextChanged(Editable s) { // TODO Auto-generated method stub } }); A: Yes you can user myEditText.addTextChangedListener() when user is Typing you will get the the email and password emailEditText.addTextChangedListener(new TextWatcher() { @Override public void onTextChanged(CharSequence s, int start, int before, int count) { //Here you can store email into variable before clicking button // TODO Auto-generated method stub } @Override public void beforeTextChanged(CharSequence s, int start, int count, int after) { // TODO Auto-generated method stub } @Override public void afterTextChanged(Editable s) { // TODO Auto-generated method stub } });
unknown
d5292
train
Change single quote to double quotes. Note that variables inside the single quotes would not be parsed. header("Location: $v1"); A: Wrong syntax. Try: $url = "http://www.google.com/"; header("Location: $url"); // ^ ^ // You should use double quotes to expand variables. A: This worked for me: $v1 = "http://www.google.com"; header('Location:'. $v1);
unknown
d5293
train
First of all, buffers are backed by the smalloc module and this module was not added by io.js devs, it was initiated in node 0.11 branch, io.js just imported it. Raw memory allocation means a lower level of memory manipulation and thus - faster operations, better performance, which is what aims both node.js and io.js. So if you need to implement something in the binary world without being limited to current Buffer API you should use smalloc to create your own ways to manipulate the memory. As the docs say: This can be used to create your own Buffer-like classes. No other properties are set, so the user will need to keep track of other necessary information (e.g. length of the allocation). Also, this is not a try to make javascript a strongly typed language, this is just memory manipulations, it can't be done in other way ensuring higher performance. A: Thanks @micnic for answering the question well. I would like to offer some supplemental information about why I implemented smalloc. Don't think raw memory allocations in JS are some strange new thing. It's the same type of mechanism that's used by Typed Arrays under the hood. So, anywhere you can use a Typed Array you could also use smalloc. The advantage of smalloc is that it doesn't define anything for you. Allowing the maximum flexibility of your API. It's also safe, because the GC will clean up your allocations when the object is no longer being used. One usage would be for a math library. Especially if writing a native module. I personally use it for tricky performance optimizations of allocating memory on an object then sharing that memory between JS and C++ to enable shared state between the two. It's by far the fastest way to do so, and has lead to some impressive optimizations in Node and io.js. Remember that you can allocate onto existing objects. Which is where the power lies. For example: function Alloc(n) { n >>>= 0; // uint32 conversion this.length = n; smalloc.alloc(n, this); } var a = new Alloc(16); There's a simple new construct that just allocates a Uint8 external data array onto the instance. I'll quickly reiterate answers to your questions: * *I wonder if there is really a need of raw memory allocation in javscript using smalloc? Yes. Think Typed Arrays. *If its needed then why? Answered above. Also, search for anything that uses Typed Arrays. *what would be the use case for using smalloc? Answered above. In addition, there are many other uses that developers are finding for it. *and if not then why did io.js members add this module? I wrote it long before io.js was around. :) *Are we trying to make javascript a strongly typed language? Absolutely no. The two are not even related. UPDATE: Because of breaking changes coming in V8 v4.4, smalloc has been marked "deprecated" starting in io.js v2.
unknown
d5294
train
souldn't you just do this? export class HeaderMainComponent { logoAlt = 'We Craft beautiful websites'; // Logo alt and title texts @ViewChild('navTrigger') navTrigger: ElementRef; isMenuShown: false; constructor(private layoutService: LayoutService, private renderer: Renderer) { } menuToggle(event: any) { if (this.navTrigger.nativeElement.classList.contains('opened')) { this.navTrigger.nativeElement.classList.remove('opened'); } else { this.navTrigger.nativeElement.classList.add('opened'); } } onMenuSelect(event: any) { this.isMenuShown = false; this.menuToggle(event); // Must apply the event binding as well } }
unknown
d5295
train
I believe you are asking how to load and store a shared list of content, that all users using the application can access. This is so that each time a user loads a page you don't have to load the contents from a database. This can be done easily with Java, as you mentioned, but with PHP you'll have to push shared application data outside of PHP... MemCache is one option. See: How to set a global variable accessible throughout the application
unknown
d5296
train
The trick is not to have your ui code and server code in two seperate files but to write a function which contains your ui and server code. Try this: shinyapp <- function(mat) { app <- list( ui = bootstrapPage( here comes your ui.R), server = function(input, output) { here comes your server.R the argument mat can be used now in server } ) runApp(app) } This worked for me.
unknown
d5297
train
You need to do a include 'AdminTab.php'; as well, since your class extends that A: Not completely sure I understand your question, are you saying that you have static class "B" which extends class "A", "A" having your regenerateThumbnailsCron() method which you want to call before anything else? If so then try this: <?php class A { private function regenerate() { .... do something .... } } class B extends A { function __construct() { if ($_GET["pass"] == "password") { parent::regenerate(); } } function regenerateThunbnailsCron() { .... do somethinig .... } } $images = new B(); $images->regenerateThumbnailsCron(); ?> This way, your parent's "regenerate()" function would get called during the constructor. You can switch this around to be a static class if you want, which if your goal is to compartmentalise any variables and functions away from the global scope would be a better way.
unknown
d5298
train
Qt GUIs can be displayed in many themes. native="true" forces the application to use the operating system's theme (on Linux, some QT apps look terrible because they don't look like the rest of the native apps).
unknown
d5299
train
This is how I did it, without collection (doesn't seem to have a shortcut to doing it with collection operators); basically it's just a nested fetch request: func dailyitems() -> [(date: String, items: [Item])]? { let request = NSFetchRequest(entityName: ItemEntity); request.returnsDistinctResults = true; request.resultType = NSFetchRequestResultType.DictionaryResultType; request.propertiesToFetch = NSArray(object: "date_string") as [AnyObject]; var array_tuples : [(date: String, items: [Item])]?; var daily_items : [Item] = [] do { let distinctResults: NSArray? = try context.executeFetchRequest(request); if(distinctResults == nil || distinctResults!.count < 1) { return nil; } array_tuples = []; for date in distinctResults! { let Itemrequest = NSFetchRequest(entityName: ItemEntity); Itemrequest.predicate = NSPredicate(format: "date_string == %@", date.valueForKey("date_string") as! String); do { let dayitems = try context.executeFetchRequest(Itemrequest) as? [NSManagedObject]; if(dayitems != nil && dayitems?.count > 0) { daily_items = []; for dayItem in dayitems! { daily_items.append( Item( item: dayItem.valueForKey("item") as! String, date: dayItem.valueForKey("date_string") as! String, value: dayItem.valueForKey("value") as! Int, ) ); } array_tuples?.append((date: date.valueForKey("date_string") as! String, items: daily_items)); } } catch { return nil; } } } catch { return nil; } return array_tuples; } Not sure if there's a more efficient way to do this...
unknown
d5300
train
I recommend using Rome: // Feed header SyndFeed feed = new SyndFeedImpl(); feed.setFeedType("rss_2.0"); feed.setTitle("Sample Feed"); feed.setLink("http://example.com/"); // Feed entries List entries = new ArrayList(); feed.setEntries(entries); SyndEntry entry = new SyndEntryImpl(); entry.setTitle("Entry #1"); entry.setLink("http://example.com/post/1"); SyndContent description = new SyndContentImpl(); description.setType("text/plain"); description.setValue("There is text in here."); entry.setDescription(description); entries.add(entry); // Write the feed to XML StringWriter writer = new StringWriter(); new SyndFeedOutput().output(feed, writer); System.out.println(writer.toString());
unknown