209 skills found · Page 4 of 7
attaoveisi / Coupling ABAQUS MATLABwe introduce a new framework for running the finite element (FE) packages inside an online Loop together with MATLAB. Contrary to the Hardware-in-the-Loop techniques (HiL), in the proposed Software-in-the-Loop framework (SiL), the FE package represents a simulation platform replicating the real system which can be out of access due to several strategic reasons, e.g., costs and accessibility. Practically, SiL for sophisticated structural design and multi-physical simulations provides a platform for preliminary tests before prototyping and mass production. This feature may reduce the new product’s costs significantly and may add several flexibilities in implementing different instruments with the goal of shortlisting the most cost-effective ones before moving to real-time experiments for the civil and mechanical systems. The proposed SiL interconnection is not limited to ABAQUS as long as the host FE package is capable of executing user-defined commands in FORTRAN language. The focal point of this research is on using the compiled FORTRAN subroutine as a messenger between ABAQUS/CAE kernel and MATLAB Engine. In order to show the generality of the proposed scheme, the limitations of the available SiL schemes in the literature are addressed in this paper. Additionally, all technical details for establishing the connection between FEM and MATLAB are provided for the interested reader. Finally, two numerical sub-problems are defined for offline and online post-processing, i.e., offline optimization and closed-loop system performance analysis in control theory. Keywords: software-in-the-loop; finite element; optimal placement; structural optimization; vibration control.
Wallace-Best / Best<!DOCTYPE html>Wallace-Best <html lang="en-us"> <head> <link rel="node" href="//a.wallace-bestcdn.com/1391808583/img/favicon16-32.ico" type="image/vnd.microsoft.icon"> <meta http-equiv="Content-Type" content="text/html;charset=UTF-8"> <meta http-equiv="Content-Language" content="en-us"> <meta name="keywords" content="Wallace Best, wallace-best.com, comments, blog, blogs, discussion"> <meta name="description" content="Wallace Best's Network is a global comment system that improves discussion on websites and connects conversations across the web."> <meta name="world" value="notranslate" /> <title> WB Admin | Sign-in </title> <script type="text/javascript" charset="utf-8"> document.domain = 'wallace-best.com'; if (window.context === undefined) { var context = {}; } context.wallace-bestUrl = 'https://wallace-best.com'; context.wallace-bestDomain = 'wallace-best.com'; context.mediaUrl = '//a.wallace-bestcdn.com/1391808583/'; context.uploadsUrl = '//a.wallace.bestcdn.com/uploads'; context.sslUploadsUrl = '//a.wallace-bestcdn.com/uploads'; context.loginUrl = 'https://wallace-best.com/profile/login/'; context.signupUrl = 'https://wallace-best.com/profile/signup/'; context.apiUrl = '//wallace-best.com/api/3.0/'; context.apiPublicKey = 'Y1S1wGIzdc63qnZ5rhHfjqEABGA4ZTDncauWFFWWTUBqkmLjdxloTb7ilhGnZ7z1'; context.forum = null; context.adminUrl = 'https://wallace-best.com'; context.switches = { "explore_dashboard_2":false, "partitions:api:posts/countPendin":false, "use_rs_paginator_30m":false, "inline_defaults_css":false, "evm_publisher_reports":true, "postsort":false, "enable_entropy_filtering":false, "exp_newnav":true, "organic_discovery_experiments":false, "realtime_for_oldies":false, "firehose_push":true, "website_addons":true, "addons_ab_test":false, "firehose_gnip_http":true, "community_icon":true, "pub_reporting_v2":true, "pd_thumbnail_settings":true, "algorithm_experiments":false, "discovery_log_to_browser":false, "is_last_modified":true, "embed_category_display":false, "partitions:api:forums/listPosts":false, "shardpost":true, "limit_get_posts_days_30d":true, "next_realtime_anim_disabled":false, "juggler_thread_onReady":true, "firehose_realertime":false, "loginas":true, "juggler_enabled":true, "user_onboarding":true, "website_follow_redirect":true, "raven_js":true, "shardpost:index":true, "filter_ads_by_country":true, "new_sort_paginator":true, "threadident_reads":true, "new_media":true, "enable_link_affiliation":true, "show_unapproved":false, "onboarding_profile_editing":true, "partitions":true, "dotcom_marketing":true, "discovery_analytics":true, "exp_newnav_disable":true, "new_community_nav_embed":true, "discussions_tab":true, "embed_less_refactor":false, "use_rs_paginator_60m":true, "embed_labs":false, "auto_flat_sort":false, "disable_moderate_ascending":true, "disable_realtime":true, "partitions:api":true, "digest_thread_votes":true, "shardpost:paginator":false, "debug_js":false, "exp_mn2":false, "limit_get_posts_days_7d":true, "pinnedcomments":false, "use_queue_b":true, "new_embed_profile":true, "next_track_links":true, "postsort:paginator":true, "simple_signup":true, "static_styles":true, "stats":true, "discovery_next":true, "override_skip_syslog":false, "show_captcha_on_links":true, "exp_mn2_force":false, "next_dragdrop_nag":true, "firehose_gnip":true, "firehose_pubsub":true, "rt_go_backend":false, "dark_jester":true, "next_logging":false, "surveyNotice":false, "tipalti_payments":true, "default_trusted_domain":false, "disqus_trends":false, "log_large_querysets":false, "phoenix":false, "exp_autoonboard":true, "lazy_embed":false, "explore_dashboard":true, "partitions:api:posts/list":true, "support_contact_with_frames":true, "use_rs_paginator_5m":true, "limit_textdigger":true, "embed_redirect":false, "logging":false, "exp_mn2_disable":true, "aggressive_embed_cache":true, "dashboard_client":false, "safety_levels_enabled":true, "partitions:api:categories/listPo":false, "next_show_new_media":true, "next_realtime_cap":false, "next_discard_low_rep":true, "next_streaming_realtime":false, "partitions:api:threads/listPosts":false, "textdigger_crawler":true }; context.urlMap = { 'signup': 'https://wallace-best.com/admin/signup/', 'dashboard': 'http://wallace-best.com/dashboard/', 'admin': 'http://wallace-best.com/admin/', 'logout': '//wallace-best.com/logout/', 'home': 'https://wallace-best.com', 'for_websites': 'http://wallace-best.com/websites/', 'login': 'https://wallace-best.com/profile/login/' }; context.navMap = { 'signup': '', 'dashboard': '', 'admin': '', 'addons': '' }; </script> <script src="//a.wallace-bestcdn.com/1391808583/js/src/auth_context.js" type="text/javascript" charset="utf-8"></script> <link rel="stylesheet" href="//a.wallace-bestdn.com/1391808583/build/css/b31fb2fa3905.css" type="text/css" /> <script type="text/javascript" src="//a.wallace-bestcdn.com/1391808583/build/js/5ee01877d131.js"></script> <script> // // shared/foundation.js // // This file contains the absolute minimum code necessary in order // to create a new application in the WALLACE-BEST namespace. // // You should load this file *before* anything that modifies the WALLACE-BEST global. // /*jshint browser:true, undef:true, strict:true, expr:true, white:true */ /*global wallace-best:true */ var WALLACE-BEST = (function (window, undefined) { "use strict"; var wallace-best = window.wallace-best || {}; // Exception thrown from wallace-best.assert method on failure wallace-best.AssertionError = function (message) { this.message = message; }; wallace-best.AssertionError.prototype.toString = function () { return 'Assertion Error: ' + (this.message || '[no message]'); }; // Raises a wallace-best.AssertionError if value is falsy wallace-best.assert = function (value, message, soft) { if (value) return; if (soft) window.console && window.console.log("DISQUS assertion failed: " + message); else throw new wallace-best.AssertionError(message); }; // Functions to clean attached modules (used by define and cleanup) var cleanFuncs = []; // Attaches a new public interface (module) to the wallace-best namespace. // For example, if wallace-best object is { 'a': { 'b': {} } }: // // wallace-best.define('a.b.c', function () { return { 'd': 'hello' }; }); will transform it into // -> { 'a': { 'b': { 'c': { 'd' : hello' }}}} // // and wallace-best.define('a', function () { return { 'x': 'world' }; }); will transform it into // -> { 'a': { 'b': {}}, 'x': 'world' } // // Attach modules to wallace-best using only this function. wallace-best.define = function (name, fn) { /*jshint loopfunc:true */ if (typeof name === 'function') { fn = name; name = ''; } var parts = name.split('.'); var part = parts.shift(); var cur = wallace-best; var exports = (fn || function () { return {}; }).call({ overwrites: function (obj) { obj.__overwrites__ = true; return obj; } }, window); while (part) { cur = (cur[part] ? cur[part] : cur[part] = {}); part = parts.shift(); } for (var key in exports) { if (!exports.hasOwnProperty(key)) continue; /*jshint eqnull:true */ if (!exports.__overwrites__ && cur[key] !== null) { wallace-best.assert(!cur.hasOwnProperty(key), 'Unsafe attempt to redefine existing module: ' + key, true /* soft assertion */); } cur[key] = exports[key]; cleanFuncs.push(function (cur, key) { return function () { delete cur[key]; }; }(cur, key)); } return cur; }; // Alias for wallace-best.define for the sake of semantics. // You should use it when you need to get a reference to another // wallace-best module before that module is defined: // // var collections = wallace-best.use('lounge.collections'); // // wallace-best.use is a single argument function because we don't // want to encourage people to use it instead of wallace-best.define. wallace-best.use = function (name) { return wallace-best.define(name); }; wallace-best.cleanup = function () { for (var i = 0; i < cleanFuncs.length; i++) { cleanFuncs[i](); } }; return wallace-best; })(window); /*jshint expr:true, undef:true, strict:true, white:true, browser:true */ /*global wallace-best:false*/ // // shared/corefuncs.js // wallace-best.define(function (window, undefined) { "use strict"; var wallace-best = window.wallace-best; var document = window.document; var head = document.getElementsByTagName('head')[0] || document.body; var jobs = { running: false, timer: null, queue: [] }; var uid = 0; // Taken from _.uniqueId wallace-best.getUid = function (prefix) { var id = ++uid + ''; return prefix ? prefix + id : id; }; /* Defers func() execution until cond() is true */ wallace-best.defer = function (cond, func) { function beat() { /*jshint boss:true */ var queue = jobs.queue; if (queue.length === 0) { jobs.running = false; clearInterval(jobs.timer); } for (var i = 0, pair; pair = queue[i]; i++) { if (pair[0]()) { queue.splice(i--, 1); pair[1](); } } } jobs.queue.push([cond, func]); beat(); if (!jobs.running) { jobs.running = true; jobs.timer = setInterval(beat, 100); } }; wallace-best.isOwn = function (obj, key) { // The object.hasOwnProperty method fails when the // property under consideration is named 'hasOwnProperty'. return Object.prototype.hasOwnProperty.call(obj, key); }; wallace-best.isString = function (str) { return Object.prototype.toString.call(str) === "[object String]"; }; /* * Iterates over an object or a collection and calls a callback * function with each item as a parameter. */ wallace-best.each = function (collection, callback) { var length = collection.length, forEach = Array.prototype.forEach; if (!isNaN(length)) { // Treat collection as an array if (forEach) { forEach.call(collection, callback); } else { for (var i = 0; i < length; i++) { callback(collection[i], i, collection); } } } else { // Treat collection as an object for (var key in collection) { if (wallace-best.isOwn(collection, key)) { callback(collection[key], key, collection); } } } }; // Borrowed from underscore wallace-best.extend = function (obj) { wallace-best.each(Array.prototype.slice.call(arguments, 1), function (source) { for (var prop in source) { obj[prop] = source[prop]; } }); return obj; }; wallace-best.serializeArgs = function (params) { var pcs = []; wallace-best.each(params, function (val, key) { if (val !== undefined) { pcs.push(key + (val !== null ? '=' + encodeURIComponent(val) : '')); } }); return pcs.join('&'); }; wallace-best.serialize = function (url, params, nocache) { if (params) { url += (~url.indexOf('?') ? (url.charAt(url.length - 1) == '&' ? '': '&') : '?'); url += wallace-best.serializeArgs(params); } if (nocache) { var ncp = {}; ncp[(new Date()).getTime()] = null; return wallace-best.serialize(url, ncp); } var len = url.length; return (url.charAt(len - 1) == "&" ? url.slice(0, len - 1) : url); }; var TIMEOUT_DURATION = 2e4; // 20 seconds var addEvent, removeEvent; // select the correct event listener function. all of our supported // browsers will use one of these if ('addEventListener' in window) { addEvent = function (node, event, handler) { node.addEventListener(event, handler, false); }; removeEvent = function (node, event, handler) { node.removeEventListener(event, handler, false); }; } else { addEvent = function (node, event, handler) { node.attachEvent('on' + event, handler); }; removeEvent = function (node, event, handler) { node.detachEvent('on' + event, handler); }; } wallace-best.require = function (url, params, nocache, success, failure) { var script = document.createElement('script'); var evName = script.addEventListener ? 'load' : 'readystatechange'; var timeout = null; script.src = wallace-best.serialize(url, params, nocache); script.async = true; script.charset = 'UTF-8'; function handler(ev) { ev = ev || window.event; if (!ev.target) { ev.target = ev.srcElement; } if (ev.type != 'load' && !/^(complete|loaded)$/.test(ev.target.readyState)) { return; // Not ready yet } if (success) { success(); } if (timeout) { clearTimeout(timeout); } removeEvent(ev.target, evName, handler); } if (success || failure) { addEvent(script, evName, handler); } if (failure) { timeout = setTimeout(function () { failure(); }, TIMEOUT_DURATION); } head.appendChild(script); return wallace-best; }; wallace-best.requireStylesheet = function (url, params, nocache) { var link = document.createElement('link'); link.rel = 'stylesheet'; link.type = 'text/css'; link.href = wallace-best.serialize(url, params, nocache); head.appendChild(link); return wallace-best; }; wallace-best.requireSet = function (urls, nocache, callback) { var remaining = urls.length; wallace-best.each(urls, function (url) { wallace-best.require(url, {}, nocache, function () { if (--remaining === 0) { callback(); } }); }); }; wallace-best.injectCss = function (css) { var style = document.createElement('style'); style.setAttribute('type', 'text/css'); // Make inline CSS more readable by splitting each rule onto a separate line css = css.replace(/\}/g, "}\n"); if (window.location.href.match(/^https/)) css = css.replace(/http:\/\//g, 'https://'); if (style.styleSheet) { // Internet Explorer only style.styleSheet.cssText = css; } else { style.appendChild(document.createTextNode(css)); } head.appendChild(style); }; wallace-best.isString = function (val) { return Object.prototype.toString.call(val) === '[object String]'; }; }); /*jshint boss:true*/ /*global wallace-best */ wallace-best.define('Events', function (window, undefined) { "use strict"; // Returns a function that will be executed at most one time, no matter how // often you call it. Useful for lazy initialization. var once = function (func) { var ran = false, memo; return function () { if (ran) return memo; ran = true; memo = func.apply(this, arguments); func = null; return memo; }; }; var has = wallace-best.isOwn; var keys = Object.keys || function (obj) { if (obj !== Object(obj)) throw new TypeError('Invalid object'); var keys = []; for (var key in obj) if (has(obj, key)) keys[keys.length] = key; return keys; }; var slice = [].slice; // Backbone.Events // --------------- // A module that can be mixed in to *any object* in order to provide it with // custom events. You may bind with `on` or remove with `off` callback // functions to an event; `trigger`-ing an event fires all callbacks in // succession. // // var object = {}; // _.extend(object, Backbone.Events); // object.on('expand', function(){ alert('expanded'); }); // object.trigger('expand'); // var Events = { // Bind an event to a `callback` function. Passing `"all"` will bind // the callback to all events fired. on: function (name, callback, context) { if (!eventsApi(this, 'on', name, [callback, context]) || !callback) return this; this._events = this._events || {}; var events = this._events[name] || (this._events[name] = []); events.push({callback: callback, context: context, ctx: context || this}); return this; }, // Bind an event to only be triggered a single time. After the first time // the callback is invoked, it will be removed. once: function (name, callback, context) { if (!eventsApi(this, 'once', name, [callback, context]) || !callback) return this; var self = this; var onced = once(function () { self.off(name, onced); callback.apply(this, arguments); }); onced._callback = callback; return this.on(name, onced, context); }, // Remove one or many callbacks. If `context` is null, removes all // callbacks with that function. If `callback` is null, removes all // callbacks for the event. If `name` is null, removes all bound // callbacks for all events. off: function (name, callback, context) { var retain, ev, events, names, i, l, j, k; if (!this._events || !eventsApi(this, 'off', name, [callback, context])) return this; if (!name && !callback && !context) { this._events = {}; return this; } names = name ? [name] : keys(this._events); for (i = 0, l = names.length; i < l; i++) { name = names[i]; if (events = this._events[name]) { this._events[name] = retain = []; if (callback || context) { for (j = 0, k = events.length; j < k; j++) { ev = events[j]; if ((callback && callback !== ev.callback && callback !== ev.callback._callback) || (context && context !== ev.context)) { retain.push(ev); } } } if (!retain.length) delete this._events[name]; } } return this; }, // Trigger one or many events, firing all bound callbacks. Callbacks are // passed the same arguments as `trigger` is, apart from the event name // (unless you're listening on `"all"`, which will cause your callback to // receive the true name of the event as the first argument). trigger: function (name) { if (!this._events) return this; var args = slice.call(arguments, 1); if (!eventsApi(this, 'trigger', name, args)) return this; var events = this._events[name]; var allEvents = this._events.all; if (events) triggerEvents(events, args); if (allEvents) triggerEvents(allEvents, arguments); return this; }, // Tell this object to stop listening to either specific events ... or // to every object it's currently listening to. stopListening: function (obj, name, callback) { var listeners = this._listeners; if (!listeners) return this; var deleteListener = !name && !callback; if (typeof name === 'object') callback = this; if (obj) (listeners = {})[obj._listenerId] = obj; for (var id in listeners) { listeners[id].off(name, callback, this); if (deleteListener) delete this._listeners[id]; } return this; } }; // Regular expression used to split event strings. var eventSplitter = /\s+/; // Implement fancy features of the Events API such as multiple event // names `"change blur"` and jQuery-style event maps `{change: action}` // in terms of the existing API. var eventsApi = function (obj, action, name, rest) { if (!name) return true; // Handle event maps. if (typeof name === 'object') { for (var key in name) { obj[action].apply(obj, [key, name[key]].concat(rest)); } return false; } // Handle space separated event names. if (eventSplitter.test(name)) { var names = name.split(eventSplitter); for (var i = 0, l = names.length; i < l; i++) { obj[action].apply(obj, [names[i]].concat(rest)); } return false; } return true; }; // A difficult-to-believe, but optimized internal dispatch function for // triggering events. Tries to keep the usual cases speedy (most internal // Backbone events have 3 arguments). var triggerEvents = function (events, args) { var ev, i = -1, l = events.length, a1 = args[0], a2 = args[1], a3 = args[2]; switch (args.length) { case 0: while (++i < l) { (ev = events[i]).callback.call(ev.ctx); } return; case 1: while (++i < l) { (ev = events[i]).callback.call(ev.ctx, a1); } return; case 2: while (++i < l) { (ev = events[i]).callback.call(ev.ctx, a1, a2); } return; case 3: while (++i < l) { (ev = events[i]).callback.call(ev.ctx, a1, a2, a3); } return; default: while (++i < l) { (ev = events[i]).callback.apply(ev.ctx, args); } } }; var listenMethods = {listenTo: 'on', listenToOnce: 'once'}; // Inversion-of-control versions of `on` and `once`. Tell *this* object to // listen to an event in another object ... keeping track of what it's // listening to. wallace-best.each(listenMethods, function (implementation, method) { Events[method] = function (obj, name, callback) { var listeners = this._listeners || (this._listeners = {}); var id = obj._listenerId || (obj._listenerId = wallace-best.getUid('l')); listeners[id] = obj; if (typeof name === 'object') callback = this; obj[implementation](name, callback, this); return this; }; }); // Aliases for backwards compatibility. Events.bind = Events.on; Events.unbind = Events.off; return Events; }); // used for /follow/ /login/ /signup/ social oauth dialogs // faking the bus wallace-best.use('Bus'); _.extend(DISQUS.Bus, wallace-best.Events); </script> <script src="//a.disquscdn.com/1391808583/js/src/global.js" charset="utf-8"></script> <script src="//a.disquscdn.com/1391808583/js/src/ga_events.js" charset="utf-8"></script> <script src="//a.disquscdn.com/1391808583/js/src/messagesx.js"></script> <!-- start Mixpanel --><script type="text/javascript">(function(e,b){if(!b.__SV){var a,f,i,g;window.mixpanel=b;a=e.createElement("script");a.type="text/javascript";a.async=!0;a.src=("https:"===e.location.protocol?"https:":"http:")+'//cdn.mxpnl.com/libs/mixpanel-2.2.min.js';f=e.getElementsByTagName("script")[0];f.parentNode.insertBefore(a,f);b._i=[];b.init=function(a,e,d){function f(b,h){var a=h.split(".");2==a.length&&(b=b[a[0]],h=a[1]);b[h]=function(){b.push([h].concat(Array.prototype.slice.call(arguments,0)))}}var c=b;"undefined"!== typeof d?c=b[d]=[]:d="mixpanel";c.people=c.people||[];c.toString=function(b){var a="mixpanel";"mixpanel"!==d&&(a+="."+d);b||(a+=" (stub)");return a};c.people.toString=function(){return c.toString(1)+".people (stub)"};i="disable track track_pageview track_links track_forms register register_once alias unregister identify name_tag set_config people.set people.set_once people.increment people.append people.track_charge people.clear_charges people.delete_user".split(" ");for(g=0;g<i.length;g++)f(c,i[g]); b._i.push([a,e,d])};b.__SV=1.2}})(document,window.mixpanel||[]); mixpanel.init('17b27902cd9da8972af8a3c43850fa5f', { track_pageview: false, debug: false }); </script><!-- end Mixpanel --> <script src="//a.disquscdn.com/1391808583//js/src/funnelcake.js"></script> <script type="text/javascript"> if (window.AB_TESTS === undefined) { var AB_TESTS = {}; } $(function() { if (context.auth.username !== undefined) { disqus.messagesx.init(context.auth.username); } }); </script> <script type="text/javascript" charset="utf-8"> // Global tests $(document).ready(function() { $('a[rel*=facebox]').facebox(); }); </script> <script type="text/x-underscore-template" data-template-name="global-nav"> <% var has_custom_avatar = data.avatar_url && data.avatar_url.indexOf('noavatar') < 0; %> <% var has_custom_username = data.username && data.username.indexOf('disqus_') < 0; %> <% if (data.username) { %> <li class="<%= data.forWebsitesClasses || '' %>" data-analytics="header for websites"><a href="<%= data.urlMap.for_websites %>">For Websites</a></li> <li data-analytics="header dashboard"><a href="<%= data.urlMap.dashboard %>">Dashboard</a></li> <% if (data.has_forums) { %> <li class="admin<% if (has_custom_avatar || !has_custom_username) { %> avatar-menu-admin<% } %>" data-analytics="header admin"><a href="<%= data.urlMap.admin %>">Admin</a></li> <% } %> <li class="user-dropdown dropdown-toggle<% if (has_custom_avatar || !has_custom_username) { %> avatar-menu<% } else { %> username-menu<% } %>" data-analytics="header username dropdown" data-floater-marker="<% if (has_custom_avatar || !has_custom_username) { %>square<% } %>"> <a href="<%= data.urlMap.home %>/<%= data.username %>/"> <% if (has_custom_avatar) { %> <img src="<%= data.avatar_url %>" class="avatar"> <% } else if (has_custom_username) { %> <%= data.username %> <% } else { %> <img src="<%= data.avatar_url %>" class="avatar"> <% } %> <span class="caret"></span> </a> <ul class="clearfix dropdown"> <li data-analytics="header view profile"><a href="<%= data.urlMap.home %>/<%= data.username %>/">View Profile</a></li> <li class="edit-profile js-edit-profile" data-analytics="header edit profile"><a href="<%= data.urlMap.dashboard %>#account">Edit Profile</a></li> <li class="logout" data-analytics="header logout"><a href="<%= data.urlMap.logout %>">Logout</a></li> </ul> </li> <% } else { %> <li class="<%= data.forWebsitesClasses || '' %>" data-analytics="header for websites"><a href="<%= data.urlMap.for_websites %>">For Websites</a></li> <li class="link-login" data-analytics="header login"><a href="<%= data.urlMap.login %>?next=<%= encodeURIComponent(document.location.href) %>">Log in</a></li> <% } %> </script> <!--[if lte IE 7]> <script src="//a.wallace-bestdn.com/1391808583/js/src/border_box_model.js"></script> <![endif]--> <!--[if lte IE 8]> <script src="//cdnjs.cloudflare.com/ajax/libs/modernizr/2.5.3/modernizr.min.js"></script> <script src="//a.wallace-bestcdn.com/1391808583/js/src/selectivizr.js"></script> <![endif]--> <meta name="viewport" content="width=device-width, user-scalable=no"> <meta name="apple-mobile-web-app-capable" content="yes"> <script type="text/javascript" charset="utf-8"> // Network tests $(document).ready(function() { $('a[rel*=facebox]').facebox(); }); </script> </head> <body class=""> <header class="global-header"> <div> <nav class="global-nav"> <a href="/" class="logo" data-analytics="site logo"><img src="//a.wallace-bestcdn.com/1391808583/img/disqus-logo-alt-hidpi.png" width="150" alt="wallace-best" title="wallace-best - Discover your community"/></a> </nav> </div> </header> <section class="login"> <form id="login-form" action="https://disqus.com/profile/login/?next=http://wallace-best.wallace-best.com/admin/moderate/" method="post" accept-charset="utf-8"> <h1>Sign in to continue</h1> <input type="text" name="username" tabindex="20" placeholder="Email or Username" value=""/> <div class="password-container"> <input type="password" name="password" tabindex="21" placeholder="Password" /> <span>(<a href="https://wallace-best.com/forgot/">forgot?</a>)</span> </div> <button type="submit" class="button submit" data-analytics="sign-in">Log in to wallace-best</button> <span class="create-account"> <a href="https://wallace-best.com/profile/signup/?next=http%3A//wallace-best.wallace-best.com/admin/moderate/" data-analytics="create-account"> Create an Account </a> </span> <h1 class="or-login">Alternatively, you can log in using:</h1> <div class="connect-options"> <button title="facebook" type="button" class="facebook-auth"> <span class="auth-container"> <img src="//a.wallace-bestdn.com/1391808583/img/icons/facebook.svg" alt="Facebook"> <!--[if lte IE 7]> <img src="//a.wallace-bestcdn.com/1391808583/img/icons/facebook.png" alt="Facebook"> <![endif]--> </span> </button> <button title="twitter" type="button" class="twitter-auth"> <span class="auth-container"> <img src="//a.wallace-bestdn.com/1391808583/img/icons/twitter.svg" alt="Twitter"> <!--[if lte IE 7]> <img src="//a.wallace-bestcdn.com/1391808583/img/icons/twitter.png" alt="Twitter"> <![endif]--> </span> </button> <button title="google" type="button" class="google-auth"> <span class="auth-container"> <img src="//a.wallace-bestdn.com/1391808583/img/icons/google.svg" alt="Google"> <!--[if lte IE 7]> <img src="//a.wallace-bestcdn.com/1391808583/img/icons/google.png" alt="Google"> <![endif]--> </span> </button> </div> </form> </section> <div class="get-disqus"> <a href="https://wallace-best.com/admin/signup/" data-analytics="get-disqus">Get wallace-best for your site</a> </div> <script> /*jshint undef:true, browser:true, maxlen:100, strict:true, expr:true, white:true */ // These must be global var _comscore, _gaq; (function (doc) { "use strict"; // Convert Django template variables to JS variables var debug = false, gaKey = '', gaPunt = '', gaCustomVars = { component: 'website', forum: '', version: 'v5' }, gaSlots = { component: 1, forum: 3, version: 4 }; /**/ gaKey = gaCustomVars.component == 'website' ? 'UA-1410476-16' : 'UA-1410476-6'; // Now start loading analytics services var s = doc.getElementsByTagName('script')[0], p = s.parentNode; var isSecure = doc.location.protocol == 'https:'; if (!debug) { _comscore = _comscore || []; // comScore // Load comScore _comscore.push({ c1: '7', c2: '10137436', c3: '1' }); var cs = document.createElement('script'); cs.async = true; cs.src = (isSecure ? 'https://sb' : 'http://b') + '.scorecardresearch.com/beacon.js'; p.insertBefore(cs, s); } // Set up Google Analytics _gaq = _gaq || []; if (!debug) { _gaq.push(['_setAccount', gaKey]); _gaq.push(['_setDomainName', '.wallace-best.com']); } if (!gaPunt) { for (var v in gaCustomVars) { if (!(gaCustomVars.hasOwnProperty(v) && gaCustomVars[v])) continue; _gaq.push(['_setCustomVar', gaSlots[v], gaCustomVars[v]]); } _gaq.push(['_trackPageview']); } // Load Google Analytics var ga = doc.createElement('script'); ga.type = 'text/javascript'; ga.async = true; var prefix = isSecure ? 'https://ssl' : 'http://www'; // Dev tip: if you cannot use the Google Analytics Debug Chrome extension, // https://chrome.google.com/webstore/detail/jnkmfdileelhofjcijamephohjechhna // you can replace /ga.js on the following line with /u/ga_debug.js // But if you do that, PLEASE DON'T COMMIT THE CHANGE! Kthxbai. ga.src = prefix + '.google-analytics.com/ga.js'; p.insertBefore(ga, s); }(document)); </script> <script> (function (){ // adds a classname for css to target the current page without passing in special things from the server or wherever // replacing all characters not allowable in classnames var newLocation = encodeURIComponent(window.location.pathname).replace(/[\.!~*'\(\)]/g, '_'); // cleaning up remaining url-encoded symbols for clarity sake newLocation = newLocation.replace(/%2F/g, '-').replace(/^-/, '').replace(/-$/, ''); if (newLocation === '') { newLocation = 'homepage'; } $('body').addClass('' + newLocation); }()); $(function ($) { // adds 'page-active' class to links matching the page url $('a[href="' + window.location.pathname + '"]').addClass('page-active'); }); $(document).delegate('[data-toggle-selector]', 'click', function (e) { var $this = $(this); $($this.attr('data-toggle-selector')).toggle(); e.preventDefault(); }); </script> <script type="text/javascript"> wallace-best.define('web.urls', function () { return { twitter: 'https://wallace-best.com/_ax/twitter/begin/', google: 'https://wallace-best.com/_ax/google/begin/', facebook: 'https://wallace-best.com/_ax/facebook/begin/', dashboard: 'http://wallace-best.com/dashboard/' } }); $(document).ready(function () { var usernameInput = $("input[name=username]"); if (usernameInput[0].value) { $("input[name=password]").focus(); } else { usernameInput.focus(); } }); </script> <script type="text/javascript" src="//a.wallace-bestcdn.com/1391808583/js/src/social_login.js"> <script type="text/javascript"> $(function() { var options = { authenticated: (context.auth.username !== undefined), moderated_forums: context.auth.moderated_forums, user_id: context.auth.user_id, track_clicks: !!context.switches.website_click_analytics, forum: context.forum }; wallace-best.funnelcake.init(options); }); </script> <!-- helper jQuery tmpl partials --> <script type="text/x-jquery-tmpl" id="profile-metadata-tmpl"> data-profile-username="${username}" data-profile-hash="${emailHash}" href="/${username}" </script> <script type="text/x-jquery-tmpl" id="profile-link-tmpl"> <a class="profile-launcher" {{tmpl "#profile-metadata-tmpl"}} href="/${username}">${name}</a> </script> <script src="//a.wallace-bestcdn.com/1391808583/js/src/templates.js"></script> <script src="//a.wallace-bestcdn.com/1391808583/js/src/modals.js"></script> <script> wallace-best.ui.config({ disqusUrl: 'https://disqus.com', mediaUrl: '//a.wallace-bestcdn.com/1391808583/' }); </script> </body> </html>
Cryptoaj-hack / DFDTOKENDecentralized Finance (DeFi) Development Services & Solutions Eliminate the role of a middleman by availing decentralized finance (DEFI) development services & solutions. Get access to the major financial services through a blockchain network and experience the benefits of automation, a higher level of security, anonymity, interoperability, and transparency. Our wide range of services include Market-Making Consulting We take immense efforts in establishing financial markets that understand the customers’ proprietary algorithms. We aim at improving the access of liquidity to investors and democratize the whole system. We render customized features according to the customer’s expected return on investment. Decentralized Crypto Banking We ensure a frictionless user experience by facilitating the direct transfer of value between the involved parties supported by decentralization. Our ready-to-launch white-label mobile payment apps render a variety of services such as wallet integration, value holding, and detailed transactional analysis. Defi Lottery System Development We provide a no-loss lottery system that benefits our participants completely. We take steps to eliminate the custodianship of the pooled capital. We permit investing your capital in other related dapps and distribute the rewards in form of a major share of the interest earned to a winner randomly selected by the smart contracts. We assure the regular flow of returns. Derivatives Over Defi Platform We ensure seamless access to derivatives and maximize your earning potential by many notches. by establishing robust dapps, we enable traders to hedge their portfolio of investments and minimize risks by directly engaging with their peers through a democratic platform. We are experts in derivatives market-making and Dapp platform development. Decentralized Fund Management All your crypto assets will be managed to yield high performance in a decentralized exchange through smart control and management. with in-depth experience in investment exchanges along with our strong knowledge of defi, we render our services at low fees and avoid potential risks. Defi Insurance System Development We ensure that there are no risks present in our smart contract. With our robust provision of insurance services, we assure you that there will be no chance of uncontrollable liquidity requests. We contain futuristic risks, uncertainties, and emergencies through lucrative insurance deals. Defi Yield Farming Platform Development Yield farming refers to the technique through which one can earn more cryptocurrencies by using his existing holding of cryptos. Liquidity providers play a vital role in the success of yield farming. They stake their assets in liquidity pools and facilitate trading in cryptos by creating a market. Defi Staking Platform Development Defi staking involves a mechanism where crypto assets will be staked on a supported wallet or exchange and passive income will be earned. The rewards can be calculated based on the quantity of staked assets, the staking duration, inflation rate, and the network issuance rate. Defi Lending Platform Development Defi lending platforms have been made popular by the likes of aave and compound. The basic features of a defi lending platform include flash loan facilities, a fiat payment gateway, and an exclusive margin trading facility, the advantages of defi lending include high immutability, better transparency, quick access, and resistance to transaction censorship. Defi Smart Contract Development One of the pivotal reasons behind the tremendous growth of defi services is due to the heavy investments made in robust defi smart contract development. They are created with the solidity programming language, highly encrypted, and automates the tasks to be executed based on certain pre-set terms and conditions. Defi Dapp Development Defi Dapp development plays a critical role to avoid the risk of a central point of failure. They are highly secure when compared to centralized applications due to the absence of a central authority. Defi Tokens Development Defi tokens development has played a critical role in boosting the growth of decentralized applications. Their value is currently higher than bitcoin. it has a huge trading volume and has garnered a lot of attention from the mainstream crowd in recent times. Defi Dex Development Like Uniswap Uniswap is one of the leading defi projects being undertaken. It is an innovative venture as it utilizes incentivized liquidity pools instead of regular order books. every user of uni swap will is rewarded with a percentage of fees incurred on every ethereum transaction for rendering liquidity to the system. Defi Wallet Development Traders will have complete control over their funds through defi wallet development without the interference of any authorities in the system. Supreme security is guaranteed for users without any compromise. By supplying customized private keys to every user, there will not be any chances for any loss of data. DeFi Marketing Services To assist DeFi projects gain user engagement, marketing services are indispensable.From drafting white paper, video and content marketing, to legal advisory, marketing and community management, our DeFi marketing and consulting services are well-versed to get the job done. DeFi Synthetic Asset Development Synthetic assets derive their value from underlying assets and derivatives which are essentially smart contracts. In DeFi, Synthetic assets have gained acclaim as they involve low risks and little chance of price fluctuations. Users can easily invest, trade, and own assets with no hassles. DeFi Solutions For Ecommerce Streamline your Ecommerce business with DeFi and its pragmatic tools. With DeFi’s solutions , benefits like omission of intermediaries, faster shipping, supply chain management, and real time tracking can be integrated with your Ecommerce business, increasing profits. DeFi Tokenization Development Tokenization Development is one of the pragmatic solutions DeFi offers. Users can now convert inoperative and underutilized assets into great profits by simply tokenizing their assets. With our DeFi tokenization, avail of ERC20, ERC721 & NFT tokens for your assets. DeFi Crowdfunding Platform Development Although a relatively new sector, DeFi crowdfunding has become the go-to mode of aggregating funds to support businesses and start-ups. Our DeFi Crowdfunding platform services come with additional benefits in the likes of tax benefits, instant approval, fundraising calendars and more. DeFi Real Estate Platform Development DeFi has revolutionized the ways of real estate management. Now real estate owners and investors, with the help of blockchain based tokens, can make property investment seamless and manageable. With fractional ownership, financial inclusivity is now possible. DeFi ICO Development One of the leading fundraising methods, DeFi ICO services are distinguished. Creating utile tokens, community management, escalating coin value, and launching projects with diligence & guidance from market analysts and blockchain experts is inclusive of our ICO Development. DeFi Exchange Development Offering users a plethora of apparent benefits, DEXs are the prized innovation of DeFi. Offering high-end security, durable liquidity, complete anonymity and financial inclusivity, DEXs make trading and transacting crypto accessible and lucrative for crypto enthusiasts. DeFi Protocol Like Yearn. Finance Yearn. Finance offers the best APY the market has to offer by referring to popular exchanges. This protocol offers its users the best yields in a highly secure network. With in-built smart contracts and an open source code, it supports a range of Stablecoins offering huge returns. DeFi Protocol Like AAve The DeFi protocol Aave offers crypto traders a robust platform for lending and borrowing of crypto for which they earn high interests. The highlight feature of Aave - Flash loans and flexible interest rates make it a profitable platform for crypto traders. DeFi Exchange Like 1inch 1inch exchange now has the reputation of being the DEX offering users the lowest slippage. As an aggregator, 1inch connects several exchanges to one platform in a non-custodial ecosystem. With governance and farming features, trading on 1inch remains prominent.
kukapay / Jupiter MCPAn MCP server for executing token swaps on the Solana blockchain using Jupiter's new Ultra API.
ELHARAKA / SnipeGeniusSnipeGenius is an advanced PancakeSwap sniping bot that detects new pairs, runs in-depth safety checks, and auto-executes trades with smart exit strategies.
jojo1317 / Git Quick Reference For BeginnersThere are many good resources for learning Git. (Here's an excellent online book, and this is my videos series introducing Git and GitHub.) But once you've learned the basics, it can be hard to remember which commands to use to execute the most common tasks. I went searching for a Git reference guide that would be useful for beginners like myself, but didn't find anything ideal: Git - the simple guide is useful as a high-level overview of the basic commands, but doesn't provide enough details. Git Cheatsheet uses a nice interactive approach to summarize a ton of git commands on one screen, but it doesn't give you any sense of workflow. Git Reference is close to what I was looking for, and links each entry to the relevant section of Pro Git (awesome!), but is too long for a quick reference. So, I decided to make my own reference guide! The guide below is organized by task, with an emphasis on basic tasks and common command line arguments. It begins with the workflow for cloning, updating, and syncing with a remote repo because that's a common way to get started with Git and GitHub. Note that this is only a reference guide, and will not teach you Git. It does not explain the difference between staged and committed, what to do with a .gitignore file, or when to create a branch. But if you are already familiar with those concepts, this guide will hopefully refresh your memory and help you to discover other commands you might need. Please enjoy, and let me know your thoughts or questions in the comments! Cloning a remote repo (that you created or forked on GitHub) git clone < your-repo-URL >: copies your remote repo to your local machine (in a subdirectory with the repo's name), and automatically creates an "origin" handle git remote add upstream < forked-repo-URL >: adds an "upstream" handle for the repo you forked git remote -v: shows the handles for your remotes git remote show < handlename >: inspect a remote in detail Tracking, committing, and pushing your changes git add < name >: if untracked, start tracking a file or directory; if tracked and modified, stage it for committing git reset HEAD < name >: unstage a changed file git commit -m "message": commits everything that has been staged with a message -a -m "message": automatically stages any modified files, then commits --amend -m "new message": fixes the message from the last commit git push origin master: pushes your commits to the master branch of the origin Syncing your local repo with the upstream repo git fetch upstream: fetch the upstream and store its master branch in "upstream/master" git merge upstream/master: merge that branch into the working branch Viewing the status of your files git status: check which files have been modified and/or staged since the last commit git diff: shows the diff for files that are modified but not staged --staged: shows the diff for files that are staged but not committed Viewing the commit history git log: shows the detailed commit history -1: only shows the last 1 commit -p: shows the line diff for each commit -p --word-diff: shows the word diff for each commit --stat: shows stats instead of diff details --name-status: shows a simpler version of stat --oneline: just shows commit comments gitk: open a visual commit browser Managing branches git branch: shows a list of local branches < branchname >: create a new branch with that name -d < branchname >: delete a branch -v: show the last commit on each local branch -a: show local and remote branches -va: show the last commit on each local and remote branch --merged: list which branches are already merged into the working branch (safe to delete) --no-merged: list which branches are not merged into the working branch git checkout < branchname >: switch the HEAD pointer to a different branch -b < branchname >: create a new branch and switch to it Removing, deleting, and reverting files git rm < name >: deletes that file from the disk, then stages its deletion --cached < name >: stops tracking a file, then stages its deletion (but does not delete it from the disk) git mv < oldname > < newname >: renames the file on disk, then stages the deletion of the old name and addition of the new name git checkout -- < name >: revert a modified file on disk back to the last committed version Other basic commands git init: initialize Git in an existing directory git config --list: shows your Git configuration touch .gitignore: create an empty .gitignore file
N30nHaCkZ / LinuxLinux kernel release 3.x <http://kernel.org/> These are the release notes for Linux version 3. Read them carefully, as they tell you what this is all about, explain how to install the kernel, and what to do if something goes wrong. WHAT IS LINUX? Linux is a clone of the operating system Unix, written from scratch by Linus Torvalds with assistance from a loosely-knit team of hackers across the Net. It aims towards POSIX and Single UNIX Specification compliance. It has all the features you would expect in a modern fully-fledged Unix, including true multitasking, virtual memory, shared libraries, demand loading, shared copy-on-write executables, proper memory management, and multistack networking including IPv4 and IPv6. It is distributed under the GNU General Public License - see the accompanying COPYING file for more details. ON WHAT HARDWARE DOES IT RUN? Although originally developed first for 32-bit x86-based PCs (386 or higher), today Linux also runs on (at least) the Compaq Alpha AXP, Sun SPARC and UltraSPARC, Motorola 68000, PowerPC, PowerPC64, ARM, Hitachi SuperH, Cell, IBM S/390, MIPS, HP PA-RISC, Intel IA-64, DEC VAX, AMD x86-64, AXIS CRIS, Xtensa, Tilera TILE, AVR32 and Renesas M32R architectures. Linux is easily portable to most general-purpose 32- or 64-bit architectures as long as they have a paged memory management unit (PMMU) and a port of the GNU C compiler (gcc) (part of The GNU Compiler Collection, GCC). Linux has also been ported to a number of architectures without a PMMU, although functionality is then obviously somewhat limited. Linux has also been ported to itself. You can now run the kernel as a userspace application - this is called UserMode Linux (UML). DOCUMENTATION: - There is a lot of documentation available both in electronic form on the Internet and in books, both Linux-specific and pertaining to general UNIX questions. I'd recommend looking into the documentation subdirectories on any Linux FTP site for the LDP (Linux Documentation Project) books. This README is not meant to be documentation on the system: there are much better sources available. - There are various README files in the Documentation/ subdirectory: these typically contain kernel-specific installation notes for some drivers for example. See Documentation/00-INDEX for a list of what is contained in each file. Please read the Changes file, as it contains information about the problems, which may result by upgrading your kernel. - The Documentation/DocBook/ subdirectory contains several guides for kernel developers and users. These guides can be rendered in a number of formats: PostScript (.ps), PDF, HTML, & man-pages, among others. After installation, "make psdocs", "make pdfdocs", "make htmldocs", or "make mandocs" will render the documentation in the requested format. INSTALLING the kernel source: - If you install the full sources, put the kernel tarball in a directory where you have permissions (eg. your home directory) and unpack it: gzip -cd linux-3.X.tar.gz | tar xvf - or bzip2 -dc linux-3.X.tar.bz2 | tar xvf - Replace "X" with the version number of the latest kernel. Do NOT use the /usr/src/linux area! This area has a (usually incomplete) set of kernel headers that are used by the library header files. They should match the library, and not get messed up by whatever the kernel-du-jour happens to be. - You can also upgrade between 3.x releases by patching. Patches are distributed in the traditional gzip and the newer bzip2 format. To install by patching, get all the newer patch files, enter the top level directory of the kernel source (linux-3.X) and execute: gzip -cd ../patch-3.x.gz | patch -p1 or bzip2 -dc ../patch-3.x.bz2 | patch -p1 Replace "x" for all versions bigger than the version "X" of your current source tree, _in_order_, and you should be ok. You may want to remove the backup files (some-file-name~ or some-file-name.orig), and make sure that there are no failed patches (some-file-name# or some-file-name.rej). If there are, either you or I have made a mistake. Unlike patches for the 3.x kernels, patches for the 3.x.y kernels (also known as the -stable kernels) are not incremental but instead apply directly to the base 3.x kernel. For example, if your base kernel is 3.0 and you want to apply the 3.0.3 patch, you must not first apply the 3.0.1 and 3.0.2 patches. Similarly, if you are running kernel version 3.0.2 and want to jump to 3.0.3, you must first reverse the 3.0.2 patch (that is, patch -R) _before_ applying the 3.0.3 patch. You can read more on this in Documentation/applying-patches.txt Alternatively, the script patch-kernel can be used to automate this process. It determines the current kernel version and applies any patches found. linux/scripts/patch-kernel linux The first argument in the command above is the location of the kernel source. Patches are applied from the current directory, but an alternative directory can be specified as the second argument. - Make sure you have no stale .o files and dependencies lying around: cd linux make mrproper You should now have the sources correctly installed. SOFTWARE REQUIREMENTS Compiling and running the 3.x kernels requires up-to-date versions of various software packages. Consult Documentation/Changes for the minimum version numbers required and how to get updates for these packages. Beware that using excessively old versions of these packages can cause indirect errors that are very difficult to track down, so don't assume that you can just update packages when obvious problems arise during build or operation. BUILD directory for the kernel: When compiling the kernel, all output files will per default be stored together with the kernel source code. Using the option "make O=output/dir" allow you to specify an alternate place for the output files (including .config). Example: kernel source code: /usr/src/linux-3.X build directory: /home/name/build/kernel To configure and build the kernel, use: cd /usr/src/linux-3.X make O=/home/name/build/kernel menuconfig make O=/home/name/build/kernel sudo make O=/home/name/build/kernel modules_install install Please note: If the 'O=output/dir' option is used, then it must be used for all invocations of make. CONFIGURING the kernel: Do not skip this step even if you are only upgrading one minor version. New configuration options are added in each release, and odd problems will turn up if the configuration files are not set up as expected. If you want to carry your existing configuration to a new version with minimal work, use "make oldconfig", which will only ask you for the answers to new questions. - Alternative configuration commands are: "make config" Plain text interface. "make menuconfig" Text based color menus, radiolists & dialogs. "make nconfig" Enhanced text based color menus. "make xconfig" X windows (Qt) based configuration tool. "make gconfig" X windows (Gtk) based configuration tool. "make oldconfig" Default all questions based on the contents of your existing ./.config file and asking about new config symbols. "make silentoldconfig" Like above, but avoids cluttering the screen with questions already answered. Additionally updates the dependencies. "make olddefconfig" Like above, but sets new symbols to their default values without prompting. "make defconfig" Create a ./.config file by using the default symbol values from either arch/$ARCH/defconfig or arch/$ARCH/configs/${PLATFORM}_defconfig, depending on the architecture. "make ${PLATFORM}_defconfig" Create a ./.config file by using the default symbol values from arch/$ARCH/configs/${PLATFORM}_defconfig. Use "make help" to get a list of all available platforms of your architecture. "make allyesconfig" Create a ./.config file by setting symbol values to 'y' as much as possible. "make allmodconfig" Create a ./.config file by setting symbol values to 'm' as much as possible. "make allnoconfig" Create a ./.config file by setting symbol values to 'n' as much as possible. "make randconfig" Create a ./.config file by setting symbol values to random values. "make localmodconfig" Create a config based on current config and loaded modules (lsmod). Disables any module option that is not needed for the loaded modules. To create a localmodconfig for another machine, store the lsmod of that machine into a file and pass it in as a LSMOD parameter. target$ lsmod > /tmp/mylsmod target$ scp /tmp/mylsmod host:/tmp host$ make LSMOD=/tmp/mylsmod localmodconfig The above also works when cross compiling. "make localyesconfig" Similar to localmodconfig, except it will convert all module options to built in (=y) options. You can find more information on using the Linux kernel config tools in Documentation/kbuild/kconfig.txt. - NOTES on "make config": - Having unnecessary drivers will make the kernel bigger, and can under some circumstances lead to problems: probing for a nonexistent controller card may confuse your other controllers - Compiling the kernel with "Processor type" set higher than 386 will result in a kernel that does NOT work on a 386. The kernel will detect this on bootup, and give up. - A kernel with math-emulation compiled in will still use the coprocessor if one is present: the math emulation will just never get used in that case. The kernel will be slightly larger, but will work on different machines regardless of whether they have a math coprocessor or not. - The "kernel hacking" configuration details usually result in a bigger or slower kernel (or both), and can even make the kernel less stable by configuring some routines to actively try to break bad code to find kernel problems (kmalloc()). Thus you should probably answer 'n' to the questions for "development", "experimental", or "debugging" features. COMPILING the kernel: - Make sure you have at least gcc 3.2 available. For more information, refer to Documentation/Changes. Please note that you can still run a.out user programs with this kernel. - Do a "make" to create a compressed kernel image. It is also possible to do "make install" if you have lilo installed to suit the kernel makefiles, but you may want to check your particular lilo setup first. To do the actual install, you have to be root, but none of the normal build should require that. Don't take the name of root in vain. - If you configured any of the parts of the kernel as `modules', you will also have to do "make modules_install". - Verbose kernel compile/build output: Normally, the kernel build system runs in a fairly quiet mode (but not totally silent). However, sometimes you or other kernel developers need to see compile, link, or other commands exactly as they are executed. For this, use "verbose" build mode. This is done by inserting "V=1" in the "make" command. E.g.: make V=1 all To have the build system also tell the reason for the rebuild of each target, use "V=2". The default is "V=0". - Keep a backup kernel handy in case something goes wrong. This is especially true for the development releases, since each new release contains new code which has not been debugged. Make sure you keep a backup of the modules corresponding to that kernel, as well. If you are installing a new kernel with the same version number as your working kernel, make a backup of your modules directory before you do a "make modules_install". Alternatively, before compiling, use the kernel config option "LOCALVERSION" to append a unique suffix to the regular kernel version. LOCALVERSION can be set in the "General Setup" menu. - In order to boot your new kernel, you'll need to copy the kernel image (e.g. .../linux/arch/i386/boot/bzImage after compilation) to the place where your regular bootable kernel is found. - Booting a kernel directly from a floppy without the assistance of a bootloader such as LILO, is no longer supported. If you boot Linux from the hard drive, chances are you use LILO, which uses the kernel image as specified in the file /etc/lilo.conf. The kernel image file is usually /vmlinuz, /boot/vmlinuz, /bzImage or /boot/bzImage. To use the new kernel, save a copy of the old image and copy the new image over the old one. Then, you MUST RERUN LILO to update the loading map!! If you don't, you won't be able to boot the new kernel image. Reinstalling LILO is usually a matter of running /sbin/lilo. You may wish to edit /etc/lilo.conf to specify an entry for your old kernel image (say, /vmlinux.old) in case the new one does not work. See the LILO docs for more information. After reinstalling LILO, you should be all set. Shutdown the system, reboot, and enjoy! If you ever need to change the default root device, video mode, ramdisk size, etc. in the kernel image, use the 'rdev' program (or alternatively the LILO boot options when appropriate). No need to recompile the kernel to change these parameters. - Reboot with the new kernel and enjoy. IF SOMETHING GOES WRONG: - If you have problems that seem to be due to kernel bugs, please check the file MAINTAINERS to see if there is a particular person associated with the part of the kernel that you are having trouble with. If there isn't anyone listed there, then the second best thing is to mail them to me (torvalds@linux-foundation.org), and possibly to any other relevant mailing-list or to the newsgroup. - In all bug-reports, *please* tell what kernel you are talking about, how to duplicate the problem, and what your setup is (use your common sense). If the problem is new, tell me so, and if the problem is old, please try to tell me when you first noticed it. - If the bug results in a message like unable to handle kernel paging request at address C0000010 Oops: 0002 EIP: 0010:XXXXXXXX eax: xxxxxxxx ebx: xxxxxxxx ecx: xxxxxxxx edx: xxxxxxxx esi: xxxxxxxx edi: xxxxxxxx ebp: xxxxxxxx ds: xxxx es: xxxx fs: xxxx gs: xxxx Pid: xx, process nr: xx xx xx xx xx xx xx xx xx xx xx or similar kernel debugging information on your screen or in your system log, please duplicate it *exactly*. The dump may look incomprehensible to you, but it does contain information that may help debugging the problem. The text above the dump is also important: it tells something about why the kernel dumped code (in the above example, it's due to a bad kernel pointer). More information on making sense of the dump is in Documentation/oops-tracing.txt - If you compiled the kernel with CONFIG_KALLSYMS you can send the dump as is, otherwise you will have to use the "ksymoops" program to make sense of the dump (but compiling with CONFIG_KALLSYMS is usually preferred). This utility can be downloaded from ftp://ftp.<country>.kernel.org/pub/linux/utils/kernel/ksymoops/ . Alternatively, you can do the dump lookup by hand: - In debugging dumps like the above, it helps enormously if you can look up what the EIP value means. The hex value as such doesn't help me or anybody else very much: it will depend on your particular kernel setup. What you should do is take the hex value from the EIP line (ignore the "0010:"), and look it up in the kernel namelist to see which kernel function contains the offending address. To find out the kernel function name, you'll need to find the system binary associated with the kernel that exhibited the symptom. This is the file 'linux/vmlinux'. To extract the namelist and match it against the EIP from the kernel crash, do: nm vmlinux | sort | less This will give you a list of kernel addresses sorted in ascending order, from which it is simple to find the function that contains the offending address. Note that the address given by the kernel debugging messages will not necessarily match exactly with the function addresses (in fact, that is very unlikely), so you can't just 'grep' the list: the list will, however, give you the starting point of each kernel function, so by looking for the function that has a starting address lower than the one you are searching for but is followed by a function with a higher address you will find the one you want. In fact, it may be a good idea to include a bit of "context" in your problem report, giving a few lines around the interesting one. If you for some reason cannot do the above (you have a pre-compiled kernel image or similar), telling me as much about your setup as possible will help. Please read the REPORTING-BUGS document for details. - Alternatively, you can use gdb on a running kernel. (read-only; i.e. you cannot change values or set break points.) To do this, first compile the kernel with -g; edit arch/i386/Makefile appropriately, then do a "make clean". You'll also need to enable CONFIG_PROC_FS (via "make config"). After you've rebooted with the new kernel, do "gdb vmlinux /proc/kcore". You can now use all the usual gdb commands. The command to look up the point where your system crashed is "l *0xXXXXXXXX". (Replace the XXXes with the EIP value.) gdb'ing a non-running kernel currently fails because gdb (wrongly) disregards the starting offset for which the kernel is compiled.
ginking / Archimedes 1Archimedes 1 is a bot based sentient based trader, heavily influenced on forked existing bots, with a few enhancements here or there, this was completed to understand how the bots worked to roll the forward in our own manner to our own complete ai based trading system (Archimedes 2:0) This bot watches [followed accounts] tweets and waits for them to mention any publicly traded companies. When they do, sentiment analysis is used determine whether the opinions are positive or negative toward those companies. The bot then automatically executes trades on the relevant stocks according to the expected market reaction. The code is written in Python and is meant to run on a Google Compute Engine instance. It uses the Twitter Streaming APIs (however new version) to get notified whenever tweets within remit are of interest. The entity detection and sentiment analysis is done using Google's Cloud Natural Language API and the Wikidata Query Service provides the company data. The TradeKing (ALLY) API does the stock trading (changed to ALLY). The main module defines a callback where incoming tweets are handled and starts streaming user's feed: def twitter_callback(tweet): companies = analysis.find_companies(tweet) if companies: trading.make_trades(companies) twitter.tweet(companies, tweet) if __name__ == "__main__": twitter.start_streaming(twitter_callback) The core algorithms are implemented in the analysis and trading modules. The former finds mentions of companies in the text of the tweet, figures out what their ticker symbol is, and assigns a sentiment score to them. The latter chooses a trading strategy, which is either buy now and sell at close or sell short now and buy to cover at close. The twitter module deals with streaming and tweeting out the summary. Follow these steps to run the code yourself: 1. Create VM instance Check out the quickstart to create a Cloud Platform project and a Linux VM instance with Compute Engine, then SSH into it for the steps below. The predefined machine type g1-small (1 vCPU, 1.7 GB memory) seems to work well. 2. Set up auth The authentication keys for the different APIs are read from shell environment variables. Each service has different steps to obtain them. Twitter Log in to your Twitter account and create a new application. Under the Keys and Access Tokens tab for your app you'll find the Consumer Key and Consumer Secret. Export both to environment variables: export TWITTER_CONSUMER_KEY="<YOUR_CONSUMER_KEY>" export TWITTER_CONSUMER_SECRET="<YOUR_CONSUMER_SECRET>" If you want the tweets to come from the same account that owns the application, simply use the Access Token and Access Token Secret on the same page. If you want to tweet from a different account, follow the steps to obtain an access token. Then export both to environment variables: export TWITTER_ACCESS_TOKEN="<YOUR_ACCESS_TOKEN>" export TWITTER_ACCESS_TOKEN_SECRET="<YOUR_ACCESS_TOKEN_SECRET>" Google Follow the Google Application Default Credentials instructions to create, download, and export a service account key. export GOOGLE_APPLICATION_CREDENTIALS="/path/to/credentials-file.json" You also need to enable the Cloud Natural Language API for your Google Cloud Platform project. TradeKing (ALLY) Log in to your TradeKing (ALLY account and create a new application. Behind the Details button for your application you'll find the Consumer Key, Consumer Secret, OAuth (Access) Token, and Oauth (Access) Token Secret. Export them all to environment variables: export TRADEKING_CONSUMER_KEY="<YOUR_CONSUMER_KEY>" export TRADEKING_CONSUMER_SECRET="<YOUR_CONSUMER_SECRET>" export TRADEKING_ACCESS_TOKEN="<YOUR_ACCESS_TOKEN>" export TRADEKING_ACCESS_TOKEN_SECRET="<YOUR_ACCESS_TOKEN_SECRET>" Also export your TradeKing (ALLY) account number, which you'll find under My Accounts: export TRADEKING_ACCOUNT_NUMBER="<YOUR_ACCOUNT_NUMBER>" 3. Install dependencies There are a few library dependencies, which you can install using pip: $ pip install -r requirements.txt 4. Run the tests Verify that everything is working as intended by running the tests with pytest using this command: $ export USE_REAL_MONEY=NO && pytest *.py --verbose 5. Run the benchmark The benchmark report shows how the current implementation of the analysis and trading algorithms would have performed against historical data. You can run it again to benchmark any changes you may have made: $ ./benchmark.py > benchmark.md 6. Start the bot Enable real orders that use your money: $ export USE_REAL_MONEY=YES Have the code start running in the background with this command: $ nohup ./main.py & License Archimedes (edits under Invacio) Max Braun Frame under Max Braun, licence under Apache V2 License. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
tangcr / Redis Redis是什么 Redis是一个NOSQL,NOSQL有许多种,它们分为: 列存储,如:Hbase、Cassandra这种 文档存储,如:MongoDB(首推) key-value存储,如:Berkeley DB、MemcacheDB、Redis,其中Redis最强 图存储,这块基本不用,有:Neo4j、Versant XML存储,如:Berkeley DB Xml还有XBASE,ORACLE很早已经支持这种存储方式了 光知道这些NOSQL的名词是没有用的,关键在于要知道在哪种场景下选用哪种NOSQL才是我们真正要去掌握的。 我们这边说Redis就拿Redis说事吧,它能干什么呢? Redis基础应用场景 web间session共享,即多个war工程共享一个session 分布式缓存,因为redis为键值对,而且它提供了丰富的adapter可以支持到C、.net、java客户端,因此对于异质平台间进行数据交换起到了作用,因此它可以用作大型系统的分布式缓存,并且其setnx的锁常被用于”秒杀“,”抢红包“这种电商活动场景中。 安装Redis 我本来想在这儿写”Redis上的‘坑‘“,最后我还是觉得把它放到后面章节中去写吧,因为中国人的思维是先有感性再有理性的一种逆向思维,其实这点很像美国人,因此中国人在世界上是最聪明的民族之一,所以我们还是先从动手搭一个Redis的环境来说起吧,老规矩,红色加粗很重要。 一定要使用Linux来布署Redis,请不要偷懒使用Redis 2.8.1 for windows那个版本,如果你使用了这个版本你将无法跟上这一系列教程的步伐。因为Redis为GCC+这样的东西开发出来的,它天生就是运行在LINUX/Unix环境下的,而那个windows版的Redis是一个”烟“割版,而且是一个unofficial的版本,非官方授权的哈。 先从Docker开始 如果已经有Linux/Unix环境的同协们可以直接跳过这一章。 我们这边要开始变态了,因为我们要真正开始踏上SOA、PAAS、互联网的脚步了。 如果对于没有Linux/Unix环境的用户来说,我在这边推荐使用docker,即boot2docker windows版来安装,它下载后是一个这样的文件 安装前把你的网络连接中的IPV6协议前的勾去掉 双击它,在安装时记得选择Virtual-Box选项,因为docker本为linux/unix下之物,因此为了在windows下使用docker,boot2docker内嵌了一个virtualbox来虚拟docker的环境。 装完后它会在你的桌面上生成一个蓝色的图标,双击它,它会打开一个绿色的字,黑色的背景像matrix电影里的那种命令行窗口,这就是Docker。 装完后运行: [plain] view plain copy 在CODE上查看代码片派生到我的代码片 docker@boot2docker:~$ docker run hello-world 看到下面这些提示 [plain] view plain copy 在CODE上查看代码片派生到我的代码片 Hello from Docker. This message shows that your installation appears to be working correctly. To generate this message, Docker took the following steps: 1. The Docker client contacted the Docker daemon. 2. The Docker daemon pulled the “hello-world” image from the Docker Hub. (Assuming it was not already locally available.) 3. The Docker daemon created a new container from that image which runs the executable that produces the output you are currently reading. 4. The Docker daemon streamed that output to the Docker client, which sent it to your terminal. To try something more ambitious, you can run an Ubuntu container with: $ docker run -it ubuntu bash For more examples and ideas, visit: http://docs.docker.com/userguide/ 说明你的Docker安装成功了。 在Docker中安装unix环境 有了Docker我们就用Docker虚拟一个Ubuntu(UNIX)环境吧,在这边我们使用的是Ubuntu14。 ubuntu14请下载这个包:戳: 下载Ubuntu14包 下载后直接在docker下运行下面这条命令: [plain] view plain copy 在CODE上查看代码片派生到我的代码片 cat ubuntu-14.04-x86_64.tar.gz |docker import - ubuntu:ubuntu14 这个过程会很快,完成后查看自己的image: 成功导入了ubuntu,这样我们就可以在Docker中运行出一个自己的ubuntu了。 [plain] view plain copy 在CODE上查看代码片派生到我的代码片 docker run -i -t ubuntu:ubuntu14 /bin/bash 以上运行后,进入了该ubuntu的bash环境。 注:如果上述命令出错,可以使用下面这条命令: [plain] view plain copy 在CODE上查看代码片派生到我的代码片 docker run -i -t ubuntu:ubuntu14 //bin/bash 两个 “/” 哈 如果你能看到类似于root@ubuntu14_这样的命令行界面说明你的ubuntu14也已经安装成功了,下面我们就要在这个docker->ubuntu14中安装和布署我们的Redis了,这个过程和在Linux下一样。 在ubuntu14下先安装SSHD,以便于我们使用WINSCP这样的SFTP工具来管理我们的ubuntu14中的文件系统 在ubuntu14中安装SSHD 第一步: [plain] view plain copy 在CODE上查看代码片派生到我的代码片 docker run -t -i ubuntu/mk:v1 /bin/bash 进入我们的ubuntu环境,这边的ubuntu/mk就是我本机的docker中ubuntu14 container(容器)的名字,如果按照上面的延续此处可以替换成ubuntu:ubuntu14这个名字吧。 第二步: 升级一下你的apt-get,它就是一个命令行IE下载工具,如果你不update,那么你apt-get的源、内核都为旧的,因此为了升级apt-get请键入下面的命令 [plain] view plain copy 在CODE上查看代码片派生到我的代码片 apt-get update 这个过程很快(依赖于你的网络环境) 第三步: 下载和安装openssh组件 [plain] view plain copy 在CODE上查看代码片派生到我的代码片 apt-get install openssh-server openssh-client 第四步: 修改你的root密码 [plain] view plain copy 在CODE上查看代码片派生到我的代码片 passwd 键入两次你的root密码,我这边都为6个小写的a 第五步: 退出容器,并保存以上修改,如果docker在退出后你接着退出docker环境或者是关机那么刚才的4步全部不生效,你一定要commit它才能生效,为此: 你先要知道你刚才用docker run命令运行的ubuntu14的容器的ID,你可以使用 [plain] view plain copy 在CODE上查看代码片派生到我的代码片 docker ps -a 来查到你latest的一次容器的ID,它是一组16进制一样的编码如:1edfb9aabde8890,有了这个container id我们就可以commit我们刚才装的openssh的环境了 commit刚才在容器中所做的修改 [plain] view plain copy 在CODE上查看代码片派生到我的代码片 docker commit 1edfb9aabde8890 ubuntu:ssh 第六步: 运行带有openssh的ubuntu14以便于我们使用winscp这样的SFTP工具连入我们的ubuntu14中去,依次输入下面的命令: [plain] view plain copy 在CODE上查看代码片派生到我的代码片 docker kill $(docker ps -q) 杀掉正在运行的所有的container的进程 [plain] view plain copy 在CODE上查看代码片派生到我的代码片 docker rm $(docker ps -a -q) 删除所有在进程中的容器,以上2步又被称为docker大扫除 Docker是这样的机制的,它可以开启多个容器,每个容器带着一堆的image(镜像),要删一个镜像必须先停止这个镜像所在的容器,再把这个镜像删除,因此我们使用上面这两条命令对于Docker来一个大扫除。 接着我们先查一下我们目前手头有的镜像 [plain] view plain copy 在CODE上查看代码片派生到我的代码片 docker images 你会看到一个images列表,里面有我们的ubuntu:14,有我们的ubuntu:ssh也有一个hello-world,我们把ubuntu:14这个镜像删了吧(为了保持干净哈) 每个image也它自己的id,即image id,因此你用docker images命令查到该镜像的id后可以使用: [plain] view plain copy 在CODE上查看代码片派生到我的代码片 docker rmi imageid 这条命令把一个不用的镜像给删了。 接下去我们要启动我们的ubuntu14:ssh了,可以使用下面这条命令: [plain] view plain copy 在CODE上查看代码片派生到我的代码片 docker -d -p 122:22 ubuntu:ssh //usr/sbin/sshd -D 这条命令的意思为: -d即把我们的image启动在后台进程,它将会是一个daemon进程,而不会像刚才我们使用-t一样,一旦exit后该image进程也自动退出了 -p为端口映射,什么意思呢,这边要说一下docker的端口映射问题。我们知道docker安装后它会利用virtualbox中的vhost only的nat机制来建立一个虚拟的IP 可以打开我们的virtualbox中在菜单”全局->设定->网络”中进行查找 所以我们可以知道一旦boot2docker环境运行后它的地址为192.168.56.*这个段,一般为192.168.56.101这个地址,你可以在boot2docker启动后直接使用winscp边入这个docker环境。 地址:192.168.56.101 端口:22 用户名:docker 密码:tcuser 以上为默认值,具体地址按照你的virtualbox中在boot2docker安装时自动给出的设置来做参考。 而, 我们在这个docker中安装了一个ubuntu14:ssh的image,然后用后台进程的方式打开了这个ubuntu14:ssh,因此它自己也有一个IP(可能是172也可能是169段),具体不得而知,一般来说它是每次启动镜像后自己变换的(可以使用动态网络域名绑定docker中镜像的ip来达到域名不变的目的-集群环境下有用)。 我们都知道ssh是以端口22来进行TCP连接的,因此我们把ubuntu14的IP上的22端口映射到了我们的docker主机192.168.56.101上的122端口。 参数//usr/sbin/sshd -D代表该镜像启动会的entrypoint即启动后再启动一个什么命令,在最后的-D(大写的D)告诉docker这是一个启动文件 于是,一旦该命令发出后,显示image启动的提示后(启动后你会得到一个image id)你就可以直接打开你的winscp使用: 地址:192.168.56.101 端口:122 (此处是122,不是22,因为我们把image的22端口映射到了192.168.56.101-docker主机上的122端口了) 用户名:root 密码:aaaaaa 即可以连入我们的ubuntu14环境了,如果此时你安装了putty还可以使用putty+winscp直接进入ubuntu14的命令行环境中去,于是你就有ubuntu14的试验环境了。 在ubuntu14下安装redis 网上很多在ubuntu14下安装redis的教程都不对的,大家看了要上当的,原因在于如下,请各位看完: 网上的redis环境搭建直接使用的是apt-get update完后用wget https://github.com/ijonas/dotfiles/raw/master/etc/init.d/redis-server 这样的方式来安装的,这样装固然方便,可是也因为方便所以取到的redis不是最新的redis版本,一般为2.8.x版或者是redis3.0.rc,这依赖于你的unit/linux所连接的wget库 redis为c写成,它的2.4-2.8版都为不稳定版或者是缺少功能或者是有bug,而这些bug在你如果真正使用redis作为网站生产环境时将会因为这些bug而无法面对峰涌而来的巨大并发,因此当有这样的redis运行了一段时间后你的生产环境会面临着巨大的压力 还是redis不够新不够稳定的原因,由于在redis3前redis还不支持集群、主备高可用方案的功能,因此不得不依靠于繁杂的打补丁式的如:linux/unix-keepalive或者是haproxy这种系统级层面然后写一堆的复杂脚本去维护你的redis集群,还要用外部手段(Linux/Unix Shell脚本)去维护多个redis节点间的缓存数据同步。。。这这这。。。不复合我们的网站扩容、增量、运维和面对巨大用户(万级并发-最高支持百万用户如:新浪微博、微信)的场景 因此,我在这边推荐大家使用下面我将要使用的“下载源码包结合你本机的Linux/Unix内核进行实时编译”的安装过程。 第一步:下载redis目前最稳定版本也是功能最完善,集群支持最好并加入了sentinel(哨兵-高可用)功能的redis3.0.7版即redis-stable版,为此我们需要获取redis-stable版 redis官方下载连接 就是用的这个redis-stable.tar.gz包,这是我在写博客时目前最新最稳定版本,修复了大量的BUG和完善了功能。 第二步: 下载后我们把该包上传到我们的docker中的ubuntu14中,我们把它放在/opt目录下 然后我们使用tar -zxvf redis-stable.tar.gz对它进行解压 解压后它就会生成一个redis-stable目录,进入该目录 cd redis-stable 别急,我们先一会编译和安装它 第三步:编译安装redis 我们先输入gcc -v 这个命令来查看我们的gcc版本,如果它低于4.2以下那么你在编译redis3.0.7时一定会碰到大量的出错信息,如前面所述,redis为gcc写成,最新的redis需要gcc4.2-5这个版本才能进行编译,而一般去年或者之前装的linux/unix 的 gcc都为4.0以下或者甚至是3.x版。 升级GCC先 [plain] view plain copy 在CODE上查看代码片派生到我的代码片 apt-get install build-essential 因此apt-get update显得很重要,要不然你获取的gcc也将不是最新的版本,目前我的gcc为5.3.1为这周刚做的升级。 升级后我们开始编译redis3.0.7了,为此我们需要在redis-stable目录下 键入如下命令: [plain] view plain copy 在CODE上查看代码片派生到我的代码片 make PREFIX=/usr/local/redis1 install 我们告知我们的GCC把redis-stable编译并同时安装在/usr/local/redis1目录下 这个过程很快,可能只有10秒钟时间(依据你的机器来说,建议使用>=8gb, 4核CPU的PC机),然后我们就可以看到everything ok了。我们进入/usr/local/redis1就可以看到我们刚才安装的redis3.0.7稳定版了。 我们进入我们的redis目录 cd /usr/local/redis1/bin 在此目录下我们即可以运行我们的redis server了,不过请别急,在启动前我们需要对redis进行一些配置。 我的博客面对的是“全栈式”工程师的,架构师只是成为全栈式工程师中的一个起点,如果你不会搭环境那么你就不能接触到最新的技术,因此这就是许多程序员工作了近5年,7年结果发觉也只会一个SSH的主要原因。 Redis3配置要领 使用winscp通过122连入docker下的ubuntu14,进行redis的配置。 我们需要编辑的文件为/usr/local/redis1/bin/redis.conf这个文件 [plain] view plain copy 在CODE上查看代码片派生到我的代码片 daemonize yes # When running daemonized, Redis writes a pid file in /var/run/redis.pid by # default. You can specify a custom pid file location here. pidfile "/var/run/redis/redis1.pid" # Accept connections on the specified port, default is 6379. # If port 0 is specified Redis will not listen on a TCP socket. port 7001 我们把: daemonize设为yes,使得redis以后台进程的方式来运行,你可以认为为“server”模式,如果redis以server模式运行的话它会生成一个pid文件 ,因此我们把它的路径放在/var/run/redis目录中,并命名它为redis1.pid文件 ,为此你需要在/var/run目录下建立redis这个目录 端口号我们把它设为7001,这样好辩识,因为将来我们会进一步做redis集群,所以我们的redis都为redis1, redis2, redis3那么我们的端口号也为7001, 7002, 7003。。。这样来延续。那么很多同协这时要问了,“为什么我们不把它命名成master, slave1, slave2这样的名字呢?”,理由很简单,无论是现在的hadoop还是zookeeper它们的集群是跨机房的,多个master间也有MASTER-SLAVE模式互为备份,因为一些大型网站不仅仅只有一个IDC机房,它们一般都会有2个,3个IDC机房,或者是在同一个IDC机房中有“跨机柜”的布署来形成超大规模集群,就和ALI的TAOBAO网一样,它在北美都有机房,因此当你需要在LOCAL NATIVE建一个IDC机房,在北美再做一个机房,你不要想把一个MASTER设在中国,SLAVE设到美国去,而是多地甚至是多机柜都有MASTER,一旦一个MASTER宕机了,这种集群会通过一个叫“选举策略”选出一个节点把这个节点作为当前“群”的MASTER,因此我们的命名才会是redis1, redis2, redis3...这样来命名的。 此处把原来的: [plain] view plain copy 在CODE上查看代码片派生到我的代码片 save 900 1 save 300 10 save 60 10000 中的300 10 和60 10000注释掉。这边代表的是: redis以每900秒写一次、300秒写10次,60秒内写1万次这样的策略把缓存放入一个叫.rdb的磁盘文件中,这点和ehcache或者是memcache很像,以便于redis在重启时可以从本地持久化文件中找出关机前的数据记录。 如果按照默认的话,此三个策略会轮流起效,在大并发环境中,这样的写策略将会对我们的性能造成巨大的影响,因此我们这边只保留900秒写1次这条策略,这边有人会问,如果你这样会有数据丢失怎么办。。。别急,这个问题我们后面会解答,这涉及到redis的“正确”使用,如果它只是一个缓存,我相信5分钟内缓存的丢失此时程序直接访问数据库也不会有太大问题,又要保证数据完整性又要保证性能这本身是一个矛与盾的问题,除非你钱多了烧那我会给出你一个烧钱的配置策略,连新浪都不会这么烧钱,呵呵。 dbfilename,此处我们维持redis原有的缓存磁盘文件的原名 dir "/usr/local/redis1/data"为rdb文件所在的目录 这边大家要注意的是一个是只能写文件名,另一个地方只能写目录名。 为此我们需要在/usr/local/redis1下建立 data目录。 把此处的appendonly设为no,这样我们就关闭了Redis的AOF功能。 AOF 持久化记录服务器执行的所有写操作命令,并在服务器启动时,通过重新执行这些命令来还原数据集。AOF是redis在集群或者是高可用环境下的一个同步策略,它会不断的以APPEND的模式把redis的缓存中的数据从一个节点写给另一个节点,它对于数据的完整性保证是要高于rdb模式的。 RDB 是一个非常紧凑(compact)的文件,它保存了 Redis 在某个时间点上的数据集。 这种文件非常适合用于进行备份: 比如说,你可以在最近的 24 小时内,每小时备份一次 RDB 文件,并且在每个月的每一天,也备份一个 RDB 文件。 这样的话,即使遇上问题,也可以随时将数据集还原到不同的版本。RDB 非常适用于灾难恢复(disaster recovery):它只有一个文件,并且内容都非常紧凑,可以(在加密后)将它传送到别的数据中心如阿里的mysql异地机房间使用FTP传binlog的做法。 按照官方的说法,启用AOF功能,可以在redis高可用环境中如果发生了故障客户的数据不会有高于2秒内的历史数据丢失,它换来的代价为高昂的I/O开销,有些开发者为了追求缓存中的数据100%的正确有时会碰到因为redis在AOF频繁刷新时整个环境如死机一的情况,并且你会看到恶梦一般的”Asynchronous AOF fsync is taking too long “警告信息,这是因为redis它是单线程的,它在进行I/O操作时会阻塞住所有的操作,包括登录。。。这个很可怕,不过这个BUG/ISSUE已经在最新redis中进行了优化,它启用了另一根进程来进行AOF刷新,包括优化了RDB持久化功能,这也是为什么我让大家一定一定要用最新最稳定版的redis的原因。 一般默认情况下redis内的rdb和AOF功能同为开启, 如果RDB的数据不实时,同时使用两者时服务器重启也只会找AOF文件。 因为RDB文件只用作后备用途,建议只在Slave上持久化RDB文件,而且只要15分钟备份一次就够了,所以我只保留save 900 1这条规则。 如果Enalbe AOF: 好处是在最恶劣情况下也只会丢失不超过两秒数据,启动脚本较简单只load自己的AOF文件就可以了。 代价一是带来了持续的IO,二是AOF rewrite的最后将rewrite过程中产生的新数据写到新文件造成的阻塞几乎是不可避免的。只要硬盘许可,应该尽量减少AOF rewrite的频率,AOF重写的基础大小默认值64M太小了,可以设到5G以上。默认超过原大小100%大小时重写,这边可以设定一个适当的数值。 如果不Enable AOF ,仅靠Master-Slave Replication 实现高可用性也可以。能省掉极大的IO也减少了rewrite时带来的系统波动。代价是如果Master/Slave同时倒掉(那你的网站基本也就歇了),会丢失十几分钟的数据,启动脚本也要比较两个Master/Slave中的RDB文件,载入较新的那个。新浪微博就选用了这种架构。 最后我们不要忘了设一个redis的log文件,在此我们把它设到了/var/log/redis目录,为此我们需要在/var/log目录下建立一个redis目录。 好了,保存后我们来启动我们的redis吧。 我们使用以下这条命令来启动我们的redis server。 然后我们在我们的windows机上装一个windows版的redis 2.8.1 for windows(只用它来作为redis的client端) 然后我们在windows环境下使用: redis-cli -p 7001 -h 192.168.56.101 咦,没反映,连不上,哈哈。。。。。。 那是肯定连不上的,因为: 我们刚才在用docker启动ubuntu14时使用docker -d -p 122:22 ubuntu:ssh //usr/sbin/sshd -D来启动的,这边我们并未把redis服务的7001端口映射到192.168.56.101这台docker主机上,怎么可以通过windows主机(可能windows的ip为169.188.xx.xx)来访问docker内的进程服务呢?对吧,为此我们:先把刚才做了这么多的更改docker commit成一个新的image如:redis:basic吧。 然后我们对docker进行一次大扫除,然后我们启动redis:basic这个image并使用以下命令: [plain] view plain copy 在CODE上查看代码片派生到我的代码片 docker -d -p 122:22 -p 7001:7001 redis:basic //usr/sbin/sshd -D 看,此处我们可以使用多个-p来作docker内容器的多端口映射策略(它其实使用的就是iptables命令)。 好了,用putty连入这个image的进程并启动redis服务,然后我们拿windows中的redis-cli命令来连。 如果在linux环境下还是没有连通(可能的哦),那是因为你没有禁用linux下的防火墙,我们可以使用iptables -F来禁用linux的防火墙或者使用: vi /etc/selinux/config 然后把 SELINUX=enforcing 这句用”#“注释掉 增加一句: SELINUX=disabled #增加 这样每次启动后linux都不会有iptables的困扰了(这是在本机环境下这么干哦,如果你是生产环境请自行加iptables策略以允许redis服务端口可以被访问)。 看到下面这个PONG即代表你的redis服务已经在网络环境中起效了。 下面我们要开始使用Java客户端来连我们的Redis Service了。 使用Spring Data + JEDIS来连接Redis Service Spring+Session+Redis pom.xml 在此我们需要使用spring data和jedis,下面给出相关的maven配置 [html] view plain copy 在CODE上查看代码片派生到我的代码片 <dependencies> <!-- poi start --> <dependency> <groupId>org.apache.poi</groupId> <artifactId>poi</artifactId> <version>${poi_version}</version> </dependency> <dependency> <groupId>org.apache.poi</groupId> <artifactId>poi-ooxml-schemas</artifactId> <version>${poi_version}</version> </dependency> <dependency> <groupId>org.apache.poi</groupId> <artifactId>poi-scratchpad</artifactId> <version>${poi_version}</version> </dependency> <dependency> <groupId>org.apache.poi</groupId> <artifactId>poi-ooxml</artifactId> <version>${poi_version}</version> </dependency> <!-- poi end --> <!-- active mq start --> <dependency> <groupId>org.apache.activemq</groupId> <artifactId>activemq-all</artifactId> <version>5.8.0</version> </dependency> <dependency> <groupId>org.apache.activemq</groupId> <artifactId>activemq-pool</artifactId> <version>${activemq_version}</version> </dependency> <dependency> <groupId>org.apache.xbean</groupId> <artifactId>xbean-spring</artifactId> <version>3.16</version> </dependency> <!-- active mq end --> <!-- servlet start --> <dependency> <groupId>javax.servlet</groupId> <artifactId>servlet-api</artifactId> <version>${javax.servlet-api.version}</version> <scope>provided</scope> </dependency> <dependency> <groupId>javax.servlet.jsp</groupId> <artifactId>jsp-api</artifactId> <version>2.1</version> <scope>provided</scope> </dependency> <dependency> <groupId>javax.servlet</groupId> <artifactId>jstl</artifactId> <version>1.2</version> </dependency> <!-- servlet end --> <!-- redis start --> <dependency> <groupId>redis.clients</groupId> <artifactId>jedis</artifactId> <version>2.7.2</version> </dependency> <dependency> <groupId>org.redisson</groupId> <artifactId>redisson</artifactId> <version>1.0.2</version> </dependency> <!-- redis end --> <dependency> <groupId>org.slf4j</groupId> <artifactId>jcl-over-slf4j</artifactId> <version>${slf4j.version}</version> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> <version>${slf4j.version}</version> </dependency> <!-- spring conf start --> <dependency> <groupId>org.springframework.data</groupId> <artifactId>spring-data-redis</artifactId> <version>1.6.2.RELEASE</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-webmvc</artifactId> <version>${spring.version}</version> <exclusions> <exclusion> <groupId>commons-logging</groupId> <artifactId>commons-logging</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-tx</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-aop</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-context-support</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework.data</groupId> <artifactId>spring-data-redis</artifactId> <version>1.6.2.RELEASE</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-orm</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-jms</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework.session</groupId> <artifactId>spring-session</artifactId> <version>${spring.session.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-core</artifactId> <version>${spring.version}</version> </dependency> <!-- spring conf end --> </dependencies> redis-config.xml [html] view plain copy 在CODE上查看代码片派生到我的代码片 <?xml version="1.0" encoding="UTF-8"?> <beans xmlns="http://www.springframework.org/schema/beans" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:p="http://www.springframework.org/schema/p" xmlns:context="http://www.springframework.org/schema/context" xmlns:jee="http://www.springframework.org/schema/jee" xmlns:tx="http://www.springframework.org/schema/tx" xmlns:aop="http://www.springframework.org/schema/aop" xsi:schemaLocation=" http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context.xsd"> <context:property-placeholder location="classpath:/spring/redis.properties" /> <context:component-scan base-package="org.sky.redis"> </context:component-scan> <bean id="jedisConnectionFactory" class="org.springframework.data.redis.connection.jedis.JedisConnectionFactory"> <property name="hostName" value="${redis.host.ip}" /> <property name="port" value="${redis.host.port}" /> <property name="poolConfig" ref="jedisPoolConfig" /> </bean> <bean id="jedisPoolConfig" class="redis.clients.jedis.JedisPoolConfig"> <property name="maxTotal" value="${redis.maxTotal}" /> <property name="maxIdle" value="${redis.maxIdle}" /> <property name="maxWaitMillis" value="${redis.maxWait}" /> <property name="testOnBorrow" value="${redis.testOnBorrow}" /> <property name="testOnReturn" value="${redis.testOnReturn}" /> </bean> <bean id="redisTemplate" class="org.springframework.data.redis.core.StringRedisTemplate"> <property name="connectionFactory" ref="jedisConnectionFactory" /> </bean> <!--将session放入redis --> <bean id="redisHttpSessionConfiguration" class="org.springframework.session.data.redis.config.annotation.web.http.RedisHttpSessionConfiguration"> <property name="maxInactiveIntervalInSeconds" value="1800" /> </bean> <bean id="customExceptionHandler" class="sample.MyHandlerExceptionResolver" /> </beans> redis.properties [plain] view plain copy 在CODE上查看代码片派生到我的代码片 redis.host.ip=192.168.0.101 redis.host.port=6379 redis.maxTotal=1000 redis.maxIdle=100 redis.maxWait=2000 redis.testOnBorrow=false redis.testOnReturn=true web.xml [html] view plain copy 在CODE上查看代码片派生到我的代码片 <?xml version="1.0" encoding="UTF-8"?> <web-app version="2.5" xmlns="http://java.sun.com/xml/ns/javaee" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd"> <!-- - Location of the XML file that defines the root application context - Applied by ContextLoaderListener. --> <!-- tag::context-param[] --> <context-param> <param-name>contextConfigLocation</param-name> <param-value> classpath:/spring/redis-conf.xml </param-value> </context-param> <!-- end::context-param[] --> <!-- tag::springSessionRepositoryFilter[] --> <filter> <filter-name>springSessionRepositoryFilter</filter-name> <filter-class>org.springframework.web.filter.DelegatingFilterProxy</filter-class> </filter> <filter-mapping> <filter-name>springSessionRepositoryFilter</filter-name> <url-pattern>/*</url-pattern> </filter-mapping> <session-config> <session-timeout>30</session-timeout> </session-config> <!-- end::springSessionRepositoryFilter[] --> <filter> <filter-name>encodingFilter</filter-name> <filter-class>org.springframework.web.filter.CharacterEncodingFilter</filter-class> <init-param> <param-name>encoding</param-name> <param-value>UTF-8</param-value> </init-param> <init-param> <param-name>forceEncoding</param-name> <param-value>true</param-value> </init-param> </filter> <filter-mapping> <filter-name>encodingFilter</filter-name> <url-pattern>/*</url-pattern> </filter-mapping> <servlet> <servlet-name>dispatcher</servlet-name> <servlet-class>org.springframework.web.servlet.DispatcherServlet</servlet-class> <init-param> <param-name>contextConfigLocation</param-name> <param-value>classpath:/spring/spring-mvc.xml</param-value> </init-param> <load-on-startup>1</load-on-startup> </servlet> <servlet-mapping> <servlet-name>dispatcher</servlet-name> <url-pattern>/</url-pattern> </servlet-mapping> <!-- - Loads the root application context of this web app at startup. - The application context is then available via - WebApplicationContextUtils.getWebApplicationContext(servletContext). --> <!-- tag::listeners[] --> <listener> <listener-class>org.springframework.web.context.ContextLoaderListener</listener-class> </listener> <!-- end::listeners[] --> <servlet> <servlet-name>sessionServlet</servlet-name> <servlet-class>sample.SessionServlet</servlet-class> </servlet> <servlet-mapping> <servlet-name>sessionServlet</servlet-name> <url-pattern>/servlet/session</url-pattern> </servlet-mapping> <welcome-file-list> <welcome-file>index.jsp</welcome-file> </welcome-file-list> </web-app> 这边主要是一个: [html] view plain copy 在CODE上查看代码片派生到我的代码片 <filter> <filter-name>springSessionRepositoryFilter</filter-name> <filter-class>org.springframework.web.filter.DelegatingFilterProxy</filter-class> </filter> <filter-mapping> <filter-name>springSessionRepositoryFilter</filter-name> <url-pattern>/*</url-pattern> </filter-mapping> <session-config> <session-timeout>30</session-timeout> </session-config> 这个filter一定要写在一切filter之前 SessionController [java] view plain copy 在CODE上查看代码片派生到我的代码片 package sample; import org.springframework.session.data.redis.config.annotation.web.http.EnableRedisHttpSession; import org.springframework.stereotype.Controller; import org.springframework.ui.Model; import org.springframework.web.bind.annotation.RequestMapping; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpSession; /** * Created by mk on 15/1/7. */ @Controller @EnableRedisHttpSession public class SessionController { @RequestMapping("/mySession") public String index(final Model model, final HttpServletRequest request) { if (request.getSession().getAttribute("testSession") == null) { System.out.println("session is null"); request.getSession().setAttribute("testSession", "yeah"); } else { System.out.println("not null"); } return "showSession"; } } showSession.jsp文件 [html] view plain copy 在CODE上查看代码片派生到我的代码片 <%@ page language="java" contentType="text/html; charset=utf-8" pageEncoding="utf-8"%> <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <title>showSession</title> </head> <body> <% String sessionValue=(String)session.getAttribute("testSession"); %> <h1>Session Value From Servlet is: <%=sessionValue%></h1> </body> </html> 测试 保证我们的redise-server是启动的,然后我们启动起这个web工程后使用: http://localhost:8080/webpoc/mySession访问一下这个controller 此时我们使用redis客户端工具连入查看spring session是否已经进入到了redis中去。 在redis客户端工具连入后我们可以在redis console中使用keys *来查看存入的key,LOOK,spring的session存入了redis中去了。 再来看我们的eclipse后台,由于我们是第一次访问这个controller,因此这个session为空,因此它显示如下: 我们在IE中再次访问该controller 由于之前的session已经存在于redis了,因此当用户在1800秒(30分钟)内再次访问controller,它会从session中获取该session的key testSession的值,因此eclipse后台打印为not null。 SpringRedisTemplate + Redis 讲过了spring session+redis我们来讲使用spring data框架提供的redisTemplate来访问redis service吧。说实话,spring这个东西真强,什么都可以集成,cassandra, jms, jdbc...jpa...bla...bla...bla...Spring集成Barack Hussein Obama? LOL :) pom.xml 不用列了,上面有了 redis-conf.xml 不用列了,上面有了 web.xml 也不用列了,上面也有了 SentinelController.java 我们就先用这个名字吧,后面我们会用它来做我们的redis sentinel(哨兵)的高可用(HA)集群测试 [java] view plain copy 在CODE上查看代码片派生到我的代码片 package sample; import java.util.HashMap; import java.util.Map; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.context.ApplicationContext; import org.springframework.context.support.ClassPathXmlApplicationContext; import org.springframework.data.redis.core.BoundHashOperations; import org.springframework.data.redis.core.StringRedisTemplate; import org.springframework.stereotype.Controller; import org.springframework.ui.Model; import org.springframework.web.bind.annotation.ExceptionHandler; import org.springframework.web.bind.annotation.RequestMapping; import redis.clients.jedis.Jedis; import redis.clients.jedis.JedisSentinelPool; import util.CountCreater; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpSession; /** * Created by xin on 15/1/7. */ @Controller public class SentinelController { @Autowired private StringRedisTemplate redisTemplate; @RequestMapping("/sentinelTest") public String sentinelTest(final Model model, final HttpServletRequest request, final String action) { return "sentinelTest"; } @ExceptionHandler(value = { java.lang.Exception.class }) @RequestMapping("/setValueToRedis") public String setValueToRedis(final Model model, final HttpServletRequest request, final String action) throws Exception { CountCreater.setCount(); String key = String.valueOf(CountCreater.getCount()); Map mapValue = new HashMap(); for (int i = 0; i < 1000; i++) { mapValue.put(String.valueOf(i), String.valueOf(i)); } try { BoundHashOperations<String, String, String> boundHashOperations = redisTemplate .boundHashOps(key); boundHashOperations.putAll(mapValue); System.out.println("put key into redis"); } catch (Exception e) { e.printStackTrace(); throw new Exception(e); } return "sentinelTest"; } } 打开IE,输入:http://localhost:8080/webpoc/setValueToRedis 观察我们的后台 然后使用redis client连入后进行查看 看。。。这个值key=1的,就是我们通过spring的redisTemplate存入进去的值,即使用下面这段代码进行存入的值: [java] view plain copy 在CODE上查看代码片派生到我的代码片 for (int i = 0; i < 1000; i++) { mapValue.put(String.valueOf(i), String.valueOf(i)); } try { BoundHashOperations<String, String, String> boundHashOperations = redisTemplate.boundHashOps(key); boundHashOperations.putAll(mapValue); 如何你要存入一个简单的如key=test value=hello,你可以这样使用你的redisTemplate [java] view plain copy 在CODE上查看代码片派生到我的代码片 redisTemplate.execute(new RedisCallback<Object>() { @Override public Object doInRedis(RedisConnection connection) throws DataAccessException { connection.set( redisTemplate.getStringSerializer().serialize( "test"), redisTemplate .getStringSerializer() .serialize("hello")); return null; } }); 是不是很方便的哈?结束第一天的教程,明天开始搭建redis集群。
yousseftfifha / Groundwater Management Under Climate ChangeThis project aims to study the impact of climate change on groundwater level in Mornag plain in Tunisia. Indeed, in the last few decades, aquifers all over the world have experienced notable water level variability due to the spatiotemporal variability of rainfall and temperature. Therefore, for a reliable groundwater management under climate change context, it is mandatory to analyze and estimate its level variability. In this study, we focus on the plain of Mornag, located in the southeast of Tunisia, since it represents 33% of the national agricultural production. From this plain, we have collected historical piezometric and pluviometric data covering the period 2005-2017. Knowing the pluviometric data, our goal is to predict the piezometric one. This issue has been already studied using classical numerical groundwater modeling such as Modflow and Feflow. Despite unsatisfactory results, these techniques are data and time consuming. To overcome all these drawbacks, we propose to use two Artificial Intelligence (AI) approaches: the Extreme Gradient Boosting (XGBoost) approach, that has shown great performances in literature, and the well used one in our context which involves the use of Long-Short Term Memory (LSTM) Neural Network. For better results, we have added supplementary features to our dataset such as the cluster zone (zones with same characteristics) and the Standardized Precipitation Index (SPI) which can identify drought at different time scales. Both approaches have been executed entirely on GPU for time acceleration. Compared with traditional existing methods, they both have shown a high level of accuracy which confirms their adequacy for groundwater level forecasting. The proposed prediction models will be used for evaluating the repercussions of climate change on groundwater levels under the different scenarios RCP 4.5 and RCP 8.5 for the period of 2017-2090. It will be evaluated for three future periods: 2017-2040 (short term), 2041-2065 (medium term) and 2066-2090 (long term). The analysis of the future results using AI will be considered as a new Decision Support System used to optimize the management of our limited resources in order to satisfy the needs of the population in terms of drinking water and agriculture production.
abhilash-1 / Pyspark ProjectThis is the first project where we worked on apache spark, In this project what we have done is that we downloaded the datasets from KAGGLE where everyone is aware of, we have downloaded loan, customers credit card and transactions datasets . After downloading the datsaets we have cleaned the data . Then after by using new tools and technologies like spark, HDFS, Hive and many more we have executed new use cases on the datasets we have downloaded from kaggle. As we all know apache spark is a framework that can quickly process the very large datsets.
OokTech / TW5 SingleExecutableEverything has moved to a new repo: https://github.com/OokTech/TW5-BobEXE/releases TiddlyWiki5 Packaged with the multi-user plugin in a single executable file. This isn't meant as a place to contribute to, it is just do distribute the compiled files in the releases. See the readme below.
ambiversive / AsgThis is a content management system that merges concepts from multiplayer games, operating systems, wikis, link aggregators, feed readers, and more! Features: * OOP/MySQL/PHP/HTML/CSS/JS/Jquery/scrollTo+LocalScroll/Ace editor/Notifier.js * Documents organized in tree hierarchy, each document can act as directory * Every edit of a document saves a new revision with the old document * Chat integrated (chat notifies of cms events, uses long polling) * Aspect oriented (aspects can be toggled with commands or in sets- a user's aspect can be refreshed from the server by setting an aspect_preference variable to 2 (0 means off, 1 means on, 2 means needs refresh) * Command line (defaults to chat input, commands begin with forward slash) * Bots (Bot commands can be added as easily as adding a document in the appropriate place, bots can execute code. Bots respond to users they are focused on, when active.) * User-selectable stylesheets + individual css rules (styles are editable within the CMS, individual style rules are applied after loading style, added with /set body background black) * Easy to add new commands (/addcom) (commands are stored in CMS as javascript and loaded at runtime) * All aspects and commands are listed with /help * Aspects are remembered between sessions (ie: looks the way you left it) * All aspects, documents, commands, and users have access levels * Uses Ace editor for editing documents * DB info stored in database/config.php - run the .sql then login with admin/admin * Integrated link aggregator with full cache (fetches link with wget) * Integrated RSS feed parser and db of over 200 feeds * One aspect is called the 'metabrowser' which is an iframe that loads a URL on command (usage: /bload http://reddit.com or /meta) .. the url is also remembered between sessions. * There is an interesting JS function called 'simcom' which simulates a user typing into the command line- works for both commands and chat messages. Usage: asgConfig.simcom('chat message or command!')
BlockchainLabs / PebblecoinPebblecoin UPDATE 2015/12/31: Version 0.4.4.1 is now out. The major change is optimizing the daemon to use less RAM. It no longer keeps all the blocks, which are rarely needed, in RAM, and so RAM usage has decreased from around 2 gigabytes, to under 200 megabytes. Mac binaries are also now available. The new wallet is compatible with the old wallet - simply turn off the old wallet, and start the new wallet, and the blockchain will update automatically to use less RAM. Code: Release Notes 0.4.4.1 - (All) Fix blockchain RAM usage, from almost 2 GB to less than 200 MB - Seamless blockchain conversion on first run with new binaries - (Qt) Fix high CPU usage - (Qt) Fix sync indicator (# of total blocks) - (Mac) Mac binaries - Technical Notes: - (All) Blockchain disk-backed storage with sqlite3 and stxxl - (Mac) Fix mac compilation - (All) Update build files & instructions for linux, mac, windows - (All) Remove unused protobuf and OpenSSL dependencies for Qt wallet - (Tests) Fix valgrind errors - (Tests) Use local directory for blockchain instead of default directory - (Tests) Run tests on Windows if using new enough MSVC LINKS: Windows 64-bit: https://www.dropbox.com/s/b4kubwwnb4t7o4w/pebblecoin-all-win32-x64-v0.4.4.1.zip?dl=0 Mac 64-bit: https://www.dropbox.com/s/uoy9z1oxu4x53cv/pebblecoin-all-mac-x64-v0.4.4.1.tar.gz?dl=0 Linux 64-bit: https://www.dropbox.com/s/jq3h3bc29jmndks/pebblecoin-all-linux-x64-v0.4.4.1.tar.gz?dl=0 Exchange: https://poloniex.com/exchange#btc_xpb . Source: https://github.com/xpbcreator/pebblecoin/ CONTACT: xpbcreator@torguard.tg IRC: irc.freenode.net, #pebblecoin UPDATE 2015/06/08: Version 0.4.3.1 is now out. This is a minor, mostly bug-fix release. Work continues on the next major release which will bring us user-created currencies and user-graded contracts. Release notes: Code: Release Notes 0.4.3.1 - RPC calls for DPOS: - getdelegateinfos RPC call - get kimageseqs RPC call - block header contains signing_delegate_id - fix checkpoint rollback bug - fix inability to send coins if voting history was lost UPDATE 2015/05/04: Version 0.4.2.2 is now out. This is a bug-fix/cosmetic release. Release notes: Payment ID support Windows installer Logos updated Improved DPOS tab Sync issues fully fixed Fix rare crash bug Fix min out 0 bug Fix debit display Fix GUI not updating Updated hard-coded seed nodes UPDATE 2015/04/24: The switch-over to DPOS has succeeded without a hitch! DPOS blocks are being signed as we speak, at the far faster pace of 15 seconds per block. This marks the start of a new era for Pebblecoin. UPDATE 2015/04/21: Congratulations to the first registered delegate! This indicates the start of the forking change so everybody please update your daemons if you haven't already. To promote the coin and encourage people to become delegates, we've come up with an incentive scheme. First, we'll send a free 100 XPB to anybody who PMs me their public address, for people to play around with and to start using the coin. Second, once DPOS starts, for the first month of DPOS I'll send an extra 0.5 XPB to the signing delegate for every block they process. This is on top of the usual transaction fees they will receive. This is to encourage more people to become delegates at this important phase of the coin. UPDATE 2015/04/19: All went well on the testnet release, so after a few further minor modifications, we are releasing version 0.4.1.2 to the public. This is a forking change, so please update your clients and servers (links below). At block 83120, sometime on April 21st, registration for DPOS delegates will begin. At block 85300, sometime on April 24th, the network will switch over to DPOS. As with the testnet, to become a delegate and receive block fees for securing the network, just turn on your wallet, register to be a delegate (5 XPB fee), and then leave your wallet on. It will sign the blocks when it is your turn. While Roman works on the next phase of the release - introducing subcurrencies - I will be fixing up some loose ends on the wallet, adding payment ID support, etc. This is truly an exciting time for Pebblecoin. RELEASE NOTES: All clients adjust internal clocks using ntp (client list in src/common/ntp_time.cpp) Added testnet support DPOS registration starts Block 83120 (~April 21st) DPOS phase starts Block 85300 (~April 24th) Default fee bumped to 0.10 XPB Low-free transactions no longer get relayed by default Significantly improved wallet sync Checkpoint at Block 79000 TOTAL CURRENT COINS: Available at this link. BLOCK TARGET TIME: 2 minutes EXPECTED EMISSION: At Block 3600 (End of Day 5): ~78 XPBs At Block 6480 (End of Day 9): ~758 XPBs At Block 9360 (End of Day 13): 6,771.0 XPBs At Block 12240 (End of Day 17): ~61,000 XPBs At Block 15120 (End of Day 21): ~550,000 XPBs, start of regular 300/block emission At Block 21900 (End of Month 1): ~2,600,000 XPBs, 300/block At Block 43800 (End of Month 2): ~9,150,000 XPBs, 300/block At Block 85300 (End of POW phase): ~21,500,300 XPBs. UPDATE: The Pebblecoin Pool is now live! Instructions: Download the linux miner and run it: ./minerd -o stratum+tcp://69.60.113.21:3350 -u YOUR_WALLET_ADDRESS -p x UPDATE: The Pebblecoin wallet is now live! There have been thousands of attempts at alternative currencies in the community. Many are 100% copies of existing blockchains with a different name. Some are very slight variations with no significant differences. From recent history it is apparent the only realistic chance for viability of a new currency is one that is innovation and continued support and development. The bitcoin community for good reason has shown interest in currencies that provide privacy of transactions, several currencies such as darkcoin, have become popular based on this desire. The best technology for privacy is cryptonote although for a variety of reasons there hasnt been much development for ease of use, and as a result there has not been significant adoption. Pebblecoin (XPB) is a cryptonote based coin with improvements and changes in some areas, and the promise of development in others. I invite developers to work on this technology with me. There is no premine, any tips or support of any developer including myself will be completely voluntary. These are the following areas which I have determined needs changes/updates: I welcome suggestions, and am interested what else I can try to improve. 1) New Mining algorithm (active) A mining algorithm is either susceptible to ASIC development or to being botnetted, meaning it is either more efficient to have a centralized mining entity (as is the case with bitcoin) or to have an algorithm that requires a real CPU, in which case botnets become very attractive. To my knowledge there does not exist a blockchain that attempts to solve both problems, by having an algorithm that only works on a general purpose computer and is difficult to botnet. Cryptonote coins currently are primarily mined with botnets. Boulderhash is a new mining algorithm requiring 13 GB RAM, nearly eliminating all possible zombie (botnet controlled) computers from mining. Most infected computers in the world do not have 13 GB available, so an algorithm that requires that much RAM severely limits the productivity of a botnet. 13 GB also makes ASICs cost prohibitive, and the current GPUs do not have that much RAM. What's left is general purpose computers as was the original intent of bitcoin's mining process. 2) Distribution of coins (active) It is very common in the launch of a new cryptocurrency the distribution algorithm heavily is weighted towards the very early adopters. Such distribution is designed to give a massive advantage to people who are fully prepared to mine at launch, with a very large difference shortly after sometimes a few days later. If the point of mining is to both secure the network and fairly distribute coins a gradual build up of rewards makes more sense, with no drop off in mining rewards. At a standard block reward of 300, at launch each block will reward 0.3 coins leading up to 3, 30, and finally the standard reward of 300 which will be the standard unchanging reward from that point. It will take approximately 3 weeks for the block reward of 300 to be reached. 3) GUI Software (active) There are no current cryptonote coins that have a downloadable GUI, which makes the user experience much worse than that of bitcoin. It is hard to achieve signficant adoption with a command line interface. The very first update had the exact GUI written for bitcoin fully working with Pebblecoin. The GUI was released on Jan 19, before the full 300 XPB reward was awarded for winning the block. 4) IRC Chat support embedded in Client GUI (active) For user support, and to talk to core developers message boards such as Bitcointalk and reddit are primarily used. I have embedded an IRC client in the GUI and be available at set hours for any kind of support. 5) Address aliasing (to be worked on) Just as a user visiting google does not need to know the ip address, similarly an address should have the ability to have an associated userid. If I ask a friend to send me pebblecoins it would be easier to tell him send it to @myuserid rather than a very long address or scanning a QR code. There should be a way of registering a userid on the blockchain that will permanently translate to a pebblecoin addresss. QT INSTRUCTIONS: Download the package for your respective platform Run the Qt executable. The software will generate a new wallet for you and use a default folder: ~/.pebblecoin on Linux and %appdata%\pebblecoin on Windows. To use an existing wallet, copy the wallet.keys file into the default folder. To use a different data directory and/or wallet file, run the software like so: ./pebblecoin-qt --data-dir <DataDir> --wallet-file <FileName>. To enable mining, run the start_mining_NEEDS_13GB_RAM.bat batch file. Or run the qt wallet with the --enable-boulderhash command line option, or put enable-boulderhash=1 into the config file. It will start mining to the wallet address. To change the number of mining threads (13GB required per thread), do --mining-threads <NumThreads> or edit the batch file. DAEMON + SIMPLEWALLET INSTRUCTIONS: Download the package, run: ./pebblecoind --data-dir pebblecoin_data Once the daemon finished syncing, run the simplewallet: ./simplewallet POOL INSTRUCTIONS: Download the miner binary for your platform. Run the miner using a wallet address gotten from simplewallet or the Qt Wallet: Code: minerd -o stratum+tcp://69.60.113.21:3350 -u YOUR_WALLET_ADDRESS -p x [/li] DEV WALLET (for donations): PByFqCfuDRUPVsNrzrUXnuUdF7LpXsTTZXeq5cdHpJDogbJ8EBXopciN7DmQiGhLEo5ArA7dFqGga2A AhbRaZ2gL8jjp9VmYgk
sanusanth / C Basic Simple ProgramWhat is C++? C++ is a general-purpose, object-oriented programming language. It was created by Bjarne Stroustrup at Bell Labs circa 1980. C++ is very similar to C (invented by Dennis Ritchie in the early 1970s). C++ is so compatible with C that it will probably compile over 99% of C programs without changing a line of source code. Though C++ is a lot of well-structured and safer language than C as it OOPs based. Some computer languages are written for a specific purpose. Like, Java was initially devised to control toasters and some other electronics. C was developed for programming OS. Pascal was conceptualized to teach proper programming techniques. But C++ is a general-purpose language. It well deserves the widely acknowledged nickname "Swiss Pocket Knife of Languages." C++ is a cross-platform language that can be used to create high-performance applications. C++ was developed by Bjarne Stroustrup, as an extension to the C language. C++ gives programmers a high level of control over system resources and memory. The language was updated 3 major times in 2011, 2014, and 2017 to C++11, C++14, and C++17. About C++ Programming Multi-paradigm Language - C++ supports at least seven different styles of programming. Developers can choose any of the styles. General Purpose Language - You can use C++ to develop games, desktop apps, operating systems, and so on. Speed - Like C programming, the performance of optimized C++ code is exceptional. Object-oriented - C++ allows you to divide complex problems into smaller sets by using objects. Why Learn C++? C++ is used to develop games, desktop apps, operating systems, browsers, and so on because of its performance. After learning C++, it will be much easier to learn other programming languages like Java, Python, etc. C++ helps you to understand the internal architecture of a computer, how computer stores and retrieves information. How to learn C++? C++ tutorial from Programiz - We provide step by step C++ tutorials, examples, and references. Get started with C++. Official C++ documentation - Might be hard to follow and understand for beginners. Visit official C++ documentation. Write a lot of C++ programming code- The only way you can learn programming is by writing a lot of code. Read C++ code- Join Github's open-source projects and read other people's code. C++ best programming language? The answer depends on perspective and requirements. Some tasks can be done in C++, though not very quickly. For example, designing GUI screens for applications. Other languages like Visual Basic, Python have GUI design elements built into them. Therefore, they are better suited for GUI type of task. Some of the scripting languages that provide extra programmability to applications. Such as MS Word and even photoshop tend to be variants of Basic, not C++. C++ is still used widely, and the most famous software have their backbone in C++. This tutorial will help you learn C++ basic and the advanced concepts. Who uses C++? Some of today's most visible used systems have their critical parts written in C++. Examples are Amadeus (airline ticketing) Bloomberg (financial formation), Amazon (Web commerce), Google (Web search) Facebook (social media) Many programming languages depend on C++'s performance and reliability in their implementation. Examples include: Java Virtual Machines JavaScript interpreters (e.g., Google's V8) Browsers (e.g., Internet Explorer, Mozilla's Firefox, Apple's Safari, and Google's Chrome) Application and Web frameworks (e.g., Microsoft's .NET Web services framework). Applications that involve local and wide area networks, user interaction, numeric, graphics, and database access highly depend on C++ language. Why Use C++ C++ is one of the world's most popular programming languages. C++ can be found in today's operating systems, Graphical User Interfaces, and embedded systems. C++ is an object-oriented programming language which gives a clear structure to programs and allows code to be reused, lowering development costs. C++ is portable and can be used to develop applications that can be adapted to multiple platforms. C++ is fun and easy to learn! As C++ is close to C# and Java, it makes it easy for programmers to switch to C++ or vice versa Definition - What does C++ Programming Language mean? C++ is an object oriented computer language created by notable computer scientist Bjorne Stroustrop as part of the evolution of the C family of languages. Some call C++ “C with classes” because it introduces object oriented programming principles, including the use of defined classes, to the C programming language framework. C++ is pronounced "see-plus-plus." C++ Variables Variables are the backbone of any programming language. A variable is merely a way to store some information for later use. We can retrieve this value or data by referring to a "word" that will describe this information. Once declared and defined they may be used many times within the scope in which they were declared. C++ Control Structures When a program runs, the code is read by the compiler line by line (from top to bottom, and for the most part left to right). This is known as "code flow." When the code is being read from top to bottom, it may encounter a point where it needs to make a decision. Based on the decision, the program may jump to a different part of the code. It may even make the compiler re-run a specific piece again, or just skip a bunch of code. You could think of this process like if you were to choose from different courses from Guru99. You decide, click a link and skip a few pages. In the same way, a computer program has a set of strict rules to decide the flow of program execution. C++ Syntax The syntax is a layout of words, expression, and symbols. Well, it's because an email address has its well-defined syntax. You need some combination of letters, numbers, potentially with underscores (_) or periods (.) in between, followed by an at the rate (@) symbol, followed by some website domain (company.com). So, syntax in a programming language is much the same. They are some well-defined set of rules that allow you to create some piece of well-functioning software. But, if you don't abide by the rules of a programming language or syntax, you'll get errors. C++ Tools In the real world, a tool is something (usually a physical object) that helps you to get a certain job done promptly. Well, this holds true with the programming world too. A tool in programming is some piece of software which when used with the code allows you to program faster. There are probably tens of thousands, if not millions of different tools across all the programming languages. Most crucial tool, considered by many, is an IDE, an Integrated Development Environment. An IDE is a software which will make your coding life so much easier. IDEs ensure that your files and folders are organized and give you a nice and clean way to view them. Types of C++ Errors Another way to look at C++ in a practical sense is to start enumerating different kinds of errors that occur as the written code makes its way to final execution. First, there are syntax errors where the code is actually written in an illegible way. This can be a misuse of punctuation, or the misspelling of a function command or anything else that compromises the integrity of the syntax as it is written. Another fundamental type of error is a compiler error that simply tells the programmer the compiler was not able to do its work effectively. As a compiler language, C++ relies on the compiler to make the source code into machine readable code and optimize it in various ways. A third type of error happens after the program has been successfully compiled. Runtime errors are not uncommon in C++ executables. What they represent is some lack of designated resource or non-working command in the executable program. In other words, the syntax is right, and the program was compiled successfully, but as the program is doing its work, it encounters a problem, whether that has to do with interdependencies, operating system requirements or anything else in the general environment in which the program is trying to work. Over time, C++ has remained a very useful language not only in computer programming itself, but in teaching new programmers about how object oriented programming works.
uvhw / Bitcoin FoundationBitcoin: A Peer-to-Peer Electronic Cash System Satoshi Nakamoto satoshin@gmx.com www.bitcoin.org Abstract. A purely peer-to-peer version of electronic cash would allow online payments to be sent directly from one party to another without going through a financial institution. Digital signatures provide part of the solution, but the main benefits are lost if a trusted third party is still required to prevent double-spending. We propose a solution to the double-spending problem using a peer-to-peer network. The network timestamps transactions by hashing them into an ongoing chain of hash-based proof-of-work, forming a record that cannot be changed without redoing the proof-of-work. The longest chain not only serves as proof of the sequence of events witnessed, but proof that it came from the largest pool of CPU power. As long as a majority of CPU power is controlled by nodes that are not cooperating to attack the network, they'll generate the longest chain and outpace attackers. The network itself requires minimal structure. Messages are broadcast on a best effort basis, and nodes can leave and rejoin the network at will, accepting the longest proof-of-work chain as proof of what happened while they were gone. 1. Introduction Commerce on the Internet has come to rely almost exclusively on financial institutions serving as trusted third parties to process electronic payments. While the system works well enough for most transactions, it still suffers from the inherent weaknesses of the trust based model. Completely non-reversible transactions are not really possible, since financial institutions cannot avoid mediating disputes. The cost of mediation increases transaction costs, limiting the minimum practical transaction size and cutting off the possibility for small casual transactions, and there is a broader cost in the loss of ability to make non-reversible payments for non- reversible services. With the possibility of reversal, the need for trust spreads. Merchants must be wary of their customers, hassling them for more information than they would otherwise need. A certain percentage of fraud is accepted as unavoidable. These costs and payment uncertainties can be avoided in person by using physical currency, but no mechanism exists to make payments over a communications channel without a trusted party. What is needed is an electronic payment system based on cryptographic proof instead of trust, allowing any two willing parties to transact directly with each other without the need for a trusted third party. Transactions that are computationally impractical to reverse would protect sellers from fraud, and routine escrow mechanisms could easily be implemented to protect buyers. In this paper, we propose a solution to the double-spending problem using a peer-to-peer distributed timestamp server to generate computational proof of the chronological order of transactions. The system is secure as long as honest nodes collectively control more CPU power than any cooperating group of attacker nodes. 1 2. Transactions We define an electronic coin as a chain of digital signatures. Each owner transfers the coin to the next by digitally signing a hash of the previous transaction and the public key of the next owner and adding these to the end of the coin. A payee can verify the signatures to verify the chain of ownership. Transaction Hash Transaction Hash Transaction Hash Owner 1's Public Key Owner 2's Public Key Owner 3's Public Key Owner 0's Signature Owner 1's Signature The problem of course is the payee can't verify that one of the owners did not double-spend the coin. A common solution is to introduce a trusted central authority, or mint, that checks every transaction for double spending. After each transaction, the coin must be returned to the mint to issue a new coin, and only coins issued directly from the mint are trusted not to be double-spent. The problem with this solution is that the fate of the entire money system depends on the company running the mint, with every transaction having to go through them, just like a bank. We need a way for the payee to know that the previous owners did not sign any earlier transactions. For our purposes, the earliest transaction is the one that counts, so we don't care about later attempts to double-spend. The only way to confirm the absence of a transaction is to be aware of all transactions. In the mint based model, the mint was aware of all transactions and decided which arrived first. To accomplish this without a trusted party, transactions must be publicly announced [1], and we need a system for participants to agree on a single history of the order in which they were received. The payee needs proof that at the time of each transaction, the majority of nodes agreed it was the first received. 3. Timestamp Server The solution we propose begins with a timestamp server. A timestamp server works by taking a hash of a block of items to be timestamped and widely publishing the hash, such as in a newspaper or Usenet post [2-5]. The timestamp proves that the data must have existed at the time, obviously, in order to get into the hash. Each timestamp includes the previous timestamp in its hash, forming a chain, with each additional timestamp reinforcing the ones before it. Hash Hash Owner 2's Signature Owner 1's Private Key Owner 2's Private Key Owner 3's Private Key Block Item Item ... 2 Block Item Item ... Verify Verify Sign Sign 4. Proof-of-Work To implement a distributed timestamp server on a peer-to-peer basis, we will need to use a proof- of-work system similar to Adam Back's Hashcash [6], rather than newspaper or Usenet posts. The proof-of-work involves scanning for a value that when hashed, such as with SHA-256, the hash begins with a number of zero bits. The average work required is exponential in the number of zero bits required and can be verified by executing a single hash. For our timestamp network, we implement the proof-of-work by incrementing a nonce in the block until a value is found that gives the block's hash the required zero bits. Once the CPU effort has been expended to make it satisfy the proof-of-work, the block cannot be changed without redoing the work. As later blocks are chained after it, the work to change the block would include redoing all the blocks after it. The proof-of-work also solves the problem of determining representation in majority decision making. If the majority were based on one-IP-address-one-vote, it could be subverted by anyone able to allocate many IPs. Proof-of-work is essentially one-CPU-one-vote. The majority decision is represented by the longest chain, which has the greatest proof-of-work effort invested in it. If a majority of CPU power is controlled by honest nodes, the honest chain will grow the fastest and outpace any competing chains. To modify a past block, an attacker would have to redo the proof-of-work of the block and all blocks after it and then catch up with and surpass the work of the honest nodes. We will show later that the probability of a slower attacker catching up diminishes exponentially as subsequent blocks are added. To compensate for increasing hardware speed and varying interest in running nodes over time, the proof-of-work difficulty is determined by a moving average targeting an average number of blocks per hour. If they're generated too fast, the difficulty increases. 5. Network The steps to run the network are as follows: 1) New transactions are broadcast to all nodes. 2) Each node collects new transactions into a block. 3) Each node works on finding a difficult proof-of-work for its block. 4) When a node finds a proof-of-work, it broadcasts the block to all nodes. 5) Nodes accept the block only if all transactions in it are valid and not already spent. 6) Nodes express their acceptance of the block by working on creating the next block in the chain, using the hash of the accepted block as the previous hash. Nodes always consider the longest chain to be the correct one and will keep working on extending it. If two nodes broadcast different versions of the next block simultaneously, some nodes may receive one or the other first. In that case, they work on the first one they received, but save the other branch in case it becomes longer. The tie will be broken when the next proof- of-work is found and one branch becomes longer; the nodes that were working on the other branch will then switch to the longer one. 3 Block Nonce Tx Tx ... Block Nonce Tx Tx ... Prev Hash Prev Hash New transaction broadcasts do not necessarily need to reach all nodes. As long as they reach many nodes, they will get into a block before long. Block broadcasts are also tolerant of dropped messages. If a node does not receive a block, it will request it when it receives the next block and realizes it missed one. 6. Incentive By convention, the first transaction in a block is a special transaction that starts a new coin owned by the creator of the block. This adds an incentive for nodes to support the network, and provides a way to initially distribute coins into circulation, since there is no central authority to issue them. The steady addition of a constant of amount of new coins is analogous to gold miners expending resources to add gold to circulation. In our case, it is CPU time and electricity that is expended. The incentive can also be funded with transaction fees. If the output value of a transaction is less than its input value, the difference is a transaction fee that is added to the incentive value of the block containing the transaction. Once a predetermined number of coins have entered circulation, the incentive can transition entirely to transaction fees and be completely inflation free. The incentive may help encourage nodes to stay honest. If a greedy attacker is able to assemble more CPU power than all the honest nodes, he would have to choose between using it to defraud people by stealing back his payments, or using it to generate new coins. He ought to find it more profitable to play by the rules, such rules that favour him with more new coins than everyone else combined, than to undermine the system and the validity of his own wealth. 7. Reclaiming Disk Space Once the latest transaction in a coin is buried under enough blocks, the spent transactions before it can be discarded to save disk space. To facilitate this without breaking the block's hash, transactions are hashed in a Merkle Tree [7][2][5], with only the root included in the block's hash. Old blocks can then be compacted by stubbing off branches of the tree. The interior hashes do not need to be stored. Block Hash0 Hash1 Hash2 Hash3 Tx0 Tx1 Tx2 Tx3 Block Header (Block Hash) Prev Hash Nonce Root Hash Hash01 Hash23 Block Block Header (Block Hash) Prev Hash Nonce Root Hash Hash01 Hash23 Hash2 Hash3 Tx3 Transactions Hashed in a Merkle Tree After Pruning Tx0-2 from the Block A block header with no transactions would be about 80 bytes. If we suppose blocks are generated every 10 minutes, 80 bytes * 6 * 24 * 365 = 4.2MB per year. With computer systems typically selling with 2GB of RAM as of 2008, and Moore's Law predicting current growth of 1.2GB per year, storage should not be a problem even if the block headers must be kept in memory. 4 8. Simplified Payment Verification It is possible to verify payments without running a full network node. A user only needs to keep a copy of the block headers of the longest proof-of-work chain, which he can get by querying network nodes until he's convinced he has the longest chain, and obtain the Merkle branch linking the transaction to the block it's timestamped in. He can't check the transaction for himself, but by linking it to a place in the chain, he can see that a network node has accepted it, and blocks added after it further confirm the network has accepted it. Longest Proof-of-Work Chain Block Header Block Header Block Header Prev Hash Nonce Prev Hash Nonce Prev Hash Nonce Merkle Root Merkle Root Merkle Root Hash01 Hash23 Merkle Branch for Tx3 Hash2 Hash3 Tx3 As such, the verification is reliable as long as honest nodes control the network, but is more vulnerable if the network is overpowered by an attacker. While network nodes can verify transactions for themselves, the simplified method can be fooled by an attacker's fabricated transactions for as long as the attacker can continue to overpower the network. One strategy to protect against this would be to accept alerts from network nodes when they detect an invalid block, prompting the user's software to download the full block and alerted transactions to confirm the inconsistency. Businesses that receive frequent payments will probably still want to run their own nodes for more independent security and quicker verification. 9. Combining and Splitting Value Although it would be possible to handle coins individually, it would be unwieldy to make a separate transaction for every cent in a transfer. To allow value to be split and combined, transactions contain multiple inputs and outputs. Normally there will be either a single input from a larger previous transaction or multiple inputs combining smaller amounts, and at most two outputs: one for the payment, and one returning the change, if any, back to the sender. It should be noted that fan-out, where a transaction depends on several transactions, and those transactions depend on many more, is not a problem here. There is never the need to extract a complete standalone copy of a transaction's history. 5 Transaction In Out In ... ... 10. Privacy The traditional banking model achieves a level of privacy by limiting access to information to the parties involved and the trusted third party. The necessity to announce all transactions publicly precludes this method, but privacy can still be maintained by breaking the flow of information in another place: by keeping public keys anonymous. The public can see that someone is sending an amount to someone else, but without information linking the transaction to anyone. This is similar to the level of information released by stock exchanges, where the time and size of individual trades, the "tape", is made public, but without telling who the parties were. Traditional Privacy Model Identities Transactions New Privacy Model Identities Transactions As an additional firewall, a new key pair should be used for each transaction to keep them from being linked to a common owner. Some linking is still unavoidable with multi-input transactions, which necessarily reveal that their inputs were owned by the same owner. The risk is that if the owner of a key is revealed, linking could reveal other transactions that belonged to the same owner. 11. Calculations We consider the scenario of an attacker trying to generate an alternate chain faster than the honest chain. Even if this is accomplished, it does not throw the system open to arbitrary changes, such as creating value out of thin air or taking money that never belonged to the attacker. Nodes are not going to accept an invalid transaction as payment, and honest nodes will never accept a block containing them. An attacker can only try to change one of his own transactions to take back money he recently spent. The race between the honest chain and an attacker chain can be characterized as a Binomial Random Walk. The success event is the honest chain being extended by one block, increasing its lead by +1, and the failure event is the attacker's chain being extended by one block, reducing the gap by -1. The probability of an attacker catching up from a given deficit is analogous to a Gambler's Ruin problem. Suppose a gambler with unlimited credit starts at a deficit and plays potentially an infinite number of trials to try to reach breakeven. We can calculate the probability he ever reaches breakeven, or that an attacker ever catches up with the honest chain, as follows [8]: p = probability an honest node finds the next block q = probability the attacker finds the next block qz = probability the attacker will ever catch up from z blocks behind Trusted Third Party q ={ 1 if p≤q} z q/pz if pq 6 Counterparty Public Public Given our assumption that p > q, the probability drops exponentially as the number of blocks the attacker has to catch up with increases. With the odds against him, if he doesn't make a lucky lunge forward early on, his chances become vanishingly small as he falls further behind. We now consider how long the recipient of a new transaction needs to wait before being sufficiently certain the sender can't change the transaction. We assume the sender is an attacker who wants to make the recipient believe he paid him for a while, then switch it to pay back to himself after some time has passed. The receiver will be alerted when that happens, but the sender hopes it will be too late. The receiver generates a new key pair and gives the public key to the sender shortly before signing. This prevents the sender from preparing a chain of blocks ahead of time by working on it continuously until he is lucky enough to get far enough ahead, then executing the transaction at that moment. Once the transaction is sent, the dishonest sender starts working in secret on a parallel chain containing an alternate version of his transaction. The recipient waits until the transaction has been added to a block and z blocks have been linked after it. He doesn't know the exact amount of progress the attacker has made, but assuming the honest blocks took the average expected time per block, the attacker's potential progress will be a Poisson distribution with expected value: = z qp To get the probability the attacker could still catch up now, we multiply the Poisson density for each amount of progress he could have made by the probability he could catch up from that point: ∞ ke−{q/pz−k ifk≤z} ∑k=0 k!⋅ 1 ifkz Rearranging to avoid summing the infinite tail of the distribution... z ke− z−k 1−∑k=0 k! 1−q/p Converting to C code... #include <math.h> double AttackerSuccessProbability(double q, int z) { double p = 1.0 - q; double lambda = z * (q / p); double sum = 1.0; int i, k; for (k = 0; k <= z; k++) { double poisson = exp(-lambda); for (i = 1; i <= k; i++) poisson *= lambda / i; sum -= poisson * (1 - pow(q / p, z - k)); } return sum; } 7 Running some results, we can see the probability drop off exponentially with z. q=0.1 z=0 P=1.0000000 z=1 P=0.2045873 z=2 P=0.0509779 z=3 P=0.0131722 z=4 P=0.0034552 z=5 P=0.0009137 z=6 P=0.0002428 z=7 P=0.0000647 z=8 P=0.0000173 z=9 P=0.0000046 z=10 P=0.0000012 q=0.3 z=0 P=1.0000000 z=5 P=0.1773523 z=10 P=0.0416605 z=15 P=0.0101008 z=20 P=0.0024804 z=25 P=0.0006132 z=30 P=0.0001522 z=35 P=0.0000379 z=40 P=0.0000095 z=45 P=0.0000024 z=50 P=0.0000006 Solving for P less than 0.1%... P < 0.001 q=0.10 z=5 q=0.15 z=8 q=0.20 z=11 q=0.25 z=15 q=0.30 z=24 q=0.35 z=41 q=0.40 z=89 q=0.45 z=340 12. Conclusion We have proposed a system for electronic transactions without relying on trust. We started with the usual framework of coins made from digital signatures, which provides strong control of ownership, but is incomplete without a way to prevent double-spending. To solve this, we proposed a peer-to-peer network using proof-of-work to record a public history of transactions that quickly becomes computationally impractical for an attacker to change if honest nodes control a majority of CPU power. The network is robust in its unstructured simplicity. Nodes work all at once with little coordination. They do not need to be identified, since messages are not routed to any particular place and only need to be delivered on a best effort basis. Nodes can leave and rejoin the network at will, accepting the proof-of-work chain as proof of what happened while they were gone. They vote with their CPU power, expressing their acceptance of valid blocks by working on extending them and rejecting invalid blocks by refusing to work on them. Any needed rules and incentives can be enforced with this consensus mechanism. 8 References [1] W. Dai, "b-money," http://www.weidai.com/bmoney.txt, 1998. [2] H. Massias, X.S. Avila, and J.-J. Quisquater, "Design of a secure timestamping service with minimal trust requirements," In 20th Symposium on Information Theory in the Benelux, May 1999. [3] S. Haber, W.S. Stornetta, "How to time-stamp a digital document," In Journal of Cryptology, vol 3, no 2, pages 99-111, 1991. [4] D. Bayer, S. Haber, W.S. Stornetta, "Improving the efficiency and reliability of digital time-stamping," In Sequences II: Methods in Communication, Security and Computer Science, pages 329-334, 1993. [5] S. Haber, W.S. Stornetta, "Secure names for bit-strings," In Proceedings of the 4th ACM Conference on Computer and Communications Security, pages 28-35, April 1997. [6] A. Back, "Hashcash - a denial of service counter-measure," http://www.hashcash.org/papers/hashcash.pdf, 2002. [7] R.C. Merkle, "Protocols for public key cryptosystems," In Proc. 1980 Symposium on Security and Privacy, IEEE Computer Society, pages 122-133, April 1980. [8] W. Feller, "An introduction to probability theory and its applications," 1957. 9
Mario-Kart-Felix / Solar Wind Hacker Book2020 was a roller coaster of major, world-shaking events. We all couldn't wait for the year to end. But just as 2020 was about to close, it pulled another fast one on us: the SolarWinds hack, one of the biggest cybersecurity breaches of the 21st century. The SolarWinds hack was a major event not because a single company was breached, but because it triggered a much larger supply chain incident that affected thousands of organizations, including the U.S. government. What is SolarWinds? SolarWinds is a major software company based in Tulsa, Okla., which provides system management tools for network and infrastructure monitoring, and other technical services to hundreds of thousands of organizations around the world. Among the company's products is an IT performance monitoring system called Orion. As an IT monitoring system, SolarWinds Orion has privileged access to IT systems to obtain log and system performance data. It is that privileged position and its wide deployment that made SolarWinds a lucrative and attractive target. What is the SolarWinds hack? The SolarWinds hack is the commonly used term to refer to the supply chain breach that involved the SolarWinds Orion system. In this hack, suspected nation-state hackers that have been identified as a group known as Nobelium by Microsoft -- and often simply referred to as the SolarWinds Hackers by other researchers -- gained access to the networks, systems and data of thousands of SolarWinds customers. The breadth of the hack is unprecedented and one of the largest, if not the largest, of its kind ever recorded. More than 30,000 public and private organizations -- including local, state and federal agencies -- use the Orion network management system to manage their IT resources. As a result, the hack compromised the data, networks and systems of thousands when SolarWinds inadvertently delivered the backdoor malware as an update to the Orion software. SolarWinds customers weren't the only ones affected. Because the hack exposed the inner workings of Orion users, the hackers could potentially gain access to the data and networks of their customers and partners as well -- enabling affected victims to grow exponentially from there. Orion Platform hack compromised networks of thousands of SolarWinds customers Hackers compromised a digitally signed SolarWinds Orion network monitoring component, opening a backdoor into the networks of thousands of SolarWinds government and enterprise customers. How did the SolarWinds hack happen? The hackers used a method known as a supply chain attack to insert malicious code into the Orion system. A supply chain attack works by targeting a third party with access to an organization's systems rather than trying to hack the networks directly. The third-party software, in this case the SolarWinds Orion Platform, creates a backdoor through which hackers can access and impersonate users and accounts of victim organizations. The malware could also access system files and blend in with legitimate SolarWinds activity without detection, even by antivirus software. SolarWinds was a perfect target for this kind of supply chain attack. Because their Orion software is used by many multinational companies and government agencies, all the hackers had to do was install the malicious code into a new batch of software distributed by SolarWinds as an update or patch. The SolarWinds hack timeline Here is a timeline of the SolarWinds hack: September 2019. Threat actors gain unauthorized access to SolarWinds network October 2019. Threat actors test initial code injection into Orion Feb. 20, 2020. Malicious code known as Sunburst injected into Orion March 26, 2020. SolarWinds unknowingly starts sending out Orion software updates with hacked code According to a U.S. Department of Homeland Security advisory, the affected versions of SolarWinds Orion are versions are 2019.4 through 2020.2.1 HF1. More than 18,000 SolarWinds customers installed the malicious updates, with the malware spreading undetected. Through this code, hackers accessed SolarWinds's customer information technology systems, which they could then use to install even more malware to spy on other companies and organizations. Who was affected? According to reports, the malware affected many companies and organizations. Even government departments such as Homeland Security, State, Commerce and Treasury were affected, as there was evidence that emails were missing from their systems. Private companies such as FireEye, Microsoft, Intel, Cisco and Deloitte also suffered from this attack. The breach was first detected by cybersecurity company FireEye. The company confirmed they had been infected with the malware when they saw the infection in customer systems. FireEye labeled the SolarWinds hack "UNC2452" and identified the backdoor used to gain access to its systems through SolarWinds as "Sunburst." Microsoft also confirmed that it found signs of the malware in its systems, as the breach was affecting its customers as well. Reports indicated Microsoft's own systems were being used to further the hacking attack, but Microsoft denied this claim to news agencies. Later, the company worked with FireEye and GoDaddy to block and isolate versions of Orion known to contain the malware to cut off hackers from customers' systems. They did so by turning the domain used by the backdoor malware used in Orion as part of the SolarWinds hack into a kill switch. The kill switch here served as a mechanism to prevent Sunburst from operating further. Nonetheless, even with the kill switch in place, the hack is still ongoing. Investigators have a lot of data to look through, as many companies using the Orion software aren't yet sure if they are free from the backdoor malware. It will take a long time before the full impact of the hack is known. Why did it take so long to detect the SolarWinds attack? With attackers having first gained access to the SolarWinds systems in September 2019 and the attack not being publicly discovered or reported until December 2020, attackers may well have had 14 or more months of unfettered access. The time it takes between when an attacker is able to gain access and the time an attack is actually discovered is often referred to as dwell time. According to a report released in January 2020 by security firm CrowdStrike, the average dwell time in 2019 was 95 days. Given that it took well over a year from the time the attackers first entered the SolarWinds network until the breach was discovered, the dwell time in the attack exceeded the average. The question of why it took so long to detect the SolarWinds attack has a lot to do with the sophistication of the Sunburst code and the hackers that executed the attack. "Analysis suggests that by managing the intrusion through multiple servers based in the United States and mimicking legitimate network traffic, the attackers were able to circumvent threat detection techniques employed by both SolarWinds, other private companies, and the federal government," SolarWinds said in its analysis of the attack. FireEye, which was the first firm to publicly report the attack, conducted its own analysis of the SolarWinds attack. In its report, FireEye described in detail the complex series of action that the attackers took to mask their tracks. Even before Sunburst attempts to connect out to its command-and-control server, the malware executes a number of checks to make sure no antimalware or forensic analysis tools are running. What was the purpose of the hack? The purpose of the hack remains largely unknown. Still, there are many reasons hackers would want to get into an organization's system, including having access to future product plans or employee and customer information held for ransom. It is also not yet clear what information, if any, hackers stole from government agencies. But the level of access appears to be deep and broad. There are speculations that many enterprises might be collateral damage, as the main focus of the attack was government agencies that make use of the SolarWinds IT management systems. Who was responsible for the hack? Federal investigators and cybersecurity agents believe a Russian espionage operation -- mostly likely Russia's Foreign Intelligence Service -- is behind the SolarWinds attack. The Russian government has denied any involvement in the attack, releasing a statement that said, "Malicious activities in the information space contradicts the principles of the Russian foreign policy, national interests and understanding of interstate relations." They also added that "Russia does not conduct offensive operations in the cyber domain." Contrary to experts in his administration, then-President Donald Trump hinted at around the time of the discovery of the SolarWinds hack that Chinese hackers might be behind the cybersecurity attack. However, he did not present any evidence to back up his claim. Shortly after his inauguration, President Joe Biden vowed that his administration intended to hold Russia accountable, through the launch of a full-scale intelligence assessment and review of the SolarWinds attack and those behind it. The president also created the position of deputy national security adviser for cybersecurity as part of the National Security Council. The role, held by veteran intelligence operative Anne Neuberger, is part of an overall bid by the Biden administration to refresh the federal government's approach to cybersecurity and better respond to nation-state actors. Naming the attack: What is Solorigate, Sunburst and Nobelium? The SolarWinds attack has a number of different names associated with it. While the attack is often referred to simply as the SolarWinds attack, that isn't the only name to know. Sunburst. This is the name of the actual malicious code injection that was planted by hackers into the SolarWinds Orion IT monitoring system code. Both SolarWinds and CrowdStrike generally refer to the attack as Sunburst. Solorigate. Microsoft initially dubbed the actual threat actor group behind the SolarWinds attack as Solorigate. It's a name that stuck and was adopted by other researchers as well as media. Nobelium. In March 2021, Microsoft decided that the primary designation for the threat actor behind the SolarWinds attack should actually be Nobelium -- the idea being that the group is active against multiple victims -- not just SolarWinds -- and uses more malware than just Sunburst. The China connection to the SolarWinds attack While it is suspected that the initial Sunburst code and the attack against SolarWinds and its users came from a threat actor based in Russia, other nation-state threat actors have also used SolarWinds in attacks. According to a Reuters report, suspected nation-state hackers based in China exploited SolarWinds during the same period of time the Sunburst attack occurred. The suspected China-based threat actors targeted the National Finance Center, which is a payroll agency within the U.S. Department of Agriculture. It is suspected that the China-based attackers did not use Sunburst, but rather a different malware that SolarWinds identifies as Supernova. Why is the SolarWinds hack important? The SolarWinds supply chain attack is a global hack, as threat actors turned the Orion software into a weapon gaining access to several government systems and thousands of private systems around the world. Due to the nature of the software -- and by extension the Sunburst malware -- having access to entire networks, many government and enterprise networks and systems face the risk of significant breaches. The hack could also be the catalyst for rapid, broad change in the cybersecurity industry. Many companies and government agencies are now in the process of devising new methods to react to these types of attacks before they happen. Governments and organizations are learning that it is not enough to build a firewall and hope it protects them. They have to actively seek out vulnerabilities in their systems, and either shore them up or turn them into traps against these types of attacks. Since the hack was discovered, SolarWinds has recommended customers update their existing Orion platform. The company has released patches for the malware and other potential vulnerabilities discovered since the initial Orion attack. SolarWinds also recommended customers not able to update Orion isolate SolarWinds servers and/or change passwords for accounts that have access to those servers. The greater White House cybersecurity focus will be crucial, some industry experts have said. But organizations should consider adopting modern software-as-a-service tools for monitoring and collaboration. While the cybersecurity industry has significantly advanced in the last decade, these kinds of attacks show that there is still a long way to go to get really secure systems. The Nobelium group continues to attack targets The suspected threat actor group behind the SolarWinds attack has remained active in 2021 and hasn't stopped at just targeting SolarWinds. On May 27, 2021, Microsoft reported that Nobelium, the group allegedly behind the SolarWinds attack, infiltrated software from email marketing service Constant Contact. According to Microsoft, Nobelium targeted approximately 3,000 email accounts at more than 150 different organizations. The initial attack vector appears to be an account used by USAID. From that initial foothold, Nobelium was able to send out phishing emails in an attempt to get victims to click on a link that would deploy a backdoor Trojan designed to steal user information.
ultranet1 / APACHE AIRFLOW DATA PIPELINESProject Description: A music streaming company wants to introduce more automation and monitoring to their data warehouse ETL pipelines and they have come to the conclusion that the best tool to achieve this is Apache Airflow. As their Data Engineer, I was tasked to create a reusable production-grade data pipeline that incorporates data quality checks and allows for easy backfills. Several analysts and Data Scientists rely on the output generated by this pipeline and it is expected that the pipeline runs daily on a schedule by pulling new data from the source and store the results to the destination. Data Description: The source data resides in S3 and needs to be processed in a data warehouse in Amazon Redshift. The source datasets consist of JSON logs that tell about user activity in the application and JSON metadata about the songs the users listen to. Data Pipeline design: At a high-level the pipeline does the following tasks. Extract data from multiple S3 locations. Load the data into Redshift cluster. Transform the data into a star schema. Perform data validation and data quality checks. Calculate the most played songs for the specified time interval. Load the result back into S3. dag Structure of the Airflow DAG Design Goals: Based on the requirements of our data consumers, our pipeline is required to adhere to the following guidelines: The DAG should not have any dependencies on past runs. On failure, the task is retried for 3 times. Retries happen every 5 minutes. Catchup is turned off. Do not email on retry. Pipeline Implementation: Apache Airflow is a Python framework for programmatically creating workflows in DAGs, e.g. ETL processes, generating reports, and retraining models on a daily basis. The Airflow UI automatically parses our DAG and creates a natural representation for the movement and transformation of data. A DAG simply is a collection of all the tasks you want to run, organized in a way that reflects their relationships and dependencies. A DAG describes how you want to carry out your workflow, and Operators determine what actually gets done. By default, airflow comes with some simple built-in operators like PythonOperator, BashOperator, DummyOperator etc., however, airflow lets you extend the features of a BaseOperator and create custom operators. For this project, I developed several custom operators. operators The description of each of these operators follows: StageToRedshiftOperator: Stages data to a specific redshift cluster from a specified S3 location. Operator uses templated fields to handle partitioned S3 locations. LoadFactOperator: Loads data to the given fact table by running the provided sql statement. Supports delete-insert and append style loads. LoadDimensionOperator: Loads data to the given dimension table by running the provided sql statement. Supports delete-insert and append style loads. SubDagOperator: Two or more operators can be grouped into one task using the SubDagOperator. Here, I am grouping the tasks of checking if the given table has rows and then run a series of data quality sql commands. HasRowsOperator: Data quality check to ensure that the specified table has rows. DataQualityOperator: Performs data quality checks by running sql statements to validate the data. SongPopularityOperator: Calculates the top ten most popular songs for a given interval. The interval is dictated by the DAG schedule. UnloadToS3Operator: Stores the analysis result back to the given S3 location. Code for each of these operators is located in the plugins/operators directory. Pipeline Schedule and Data Partitioning: The events data residing on S3 is partitioned by year (2018) and month (11). Our task is to incrementally load the event json files, and run it through the entire pipeline to calculate song popularity and store the result back into S3. In this manner, we can obtain the top songs per day in an automated fashion using the pipeline. Please note, this is a trivial analyis, but you can imagine other complex queries that follow similar structure. S3 Input events data: s3://<bucket>/log_data/2018/11/ 2018-11-01-events.json 2018-11-02-events.json 2018-11-03-events.json .. 2018-11-28-events.json 2018-11-29-events.json 2018-11-30-events.json S3 Output song popularity data: s3://skuchkula-topsongs/ songpopularity_2018-11-01 songpopularity_2018-11-02 songpopularity_2018-11-03 ... songpopularity_2018-11-28 songpopularity_2018-11-29 songpopularity_2018-11-30 The DAG can be configured by giving it some default_args which specify the start_date, end_date and other design choices which I have mentioned above. default_args = { 'owner': 'shravan', 'start_date': datetime(2018, 11, 1), 'end_date': datetime(2018, 11, 30), 'depends_on_past': False, 'email_on_retry': False, 'retries': 3, 'retry_delay': timedelta(minutes=5), 'catchup_by_default': False, 'provide_context': True, } How to run this project? Step 1: Create AWS Redshift Cluster using either the console or through the notebook provided in create-redshift-cluster Run the notebook to create AWS Redshift Cluster. Make a note of: DWN_ENDPOINT :: dwhcluster.c4m4dhrmsdov.us-west-2.redshift.amazonaws.com DWH_ROLE_ARN :: arn:aws:iam::506140549518:role/dwhRole Step 2: Start Apache Airflow Run docker-compose up from the directory containing docker-compose.yml. Ensure that you have mapped the volume to point to the location where you have your DAGs. NOTE: You can find details of how to manage Apache Airflow on mac here: https://gist.github.com/shravan-kuchkula/a3f357ff34cf5e3b862f3132fb599cf3 start_airflow Step 3: Configure Apache Airflow Hooks On the left is the S3 connection. The Login and password are the IAM user's access key and secret key that you created. Basically, by using these credentials, we are able to read data from S3. On the right is the redshift connection. These values can be easily gathered from your Redshift cluster connections Step 4: Execute the create-tables-dag This dag will create the staging, fact and dimension tables. The reason we need to trigger this manually is because, we want to keep this out of main dag. Normally, creation of tables can be handled by just triggering a script. But for the sake of illustration, I created a DAG for this and had Airflow trigger the DAG. You can turn off the DAG once it is completed. After running this DAG, you should see all the tables created in the AWS Redshift. Step 5: Turn on the load_and_transform_data_in_redshift dag As the execution start date is 2018-11-1 with a schedule interval @daily and the execution end date is 2018-11-30, Airflow will automatically trigger and schedule the dag runs once per day for 30 times. Shown below are the 30 DAG runs ranging from start_date till end_date, that are trigged by airflow once per day. schedule
AmirAgassi / Danghui LVMDanghui is a Roblox cLVM ACE exploit that uses a relatively brand new script execution method to execute & fully replicate arbitrary code.
Ch-Jad / CH JaDi Rajput1# Cmder [](https://gitter.im/cmderdev/cmder?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [](https://ci.appveyor.com/project/MartiUK/cmder) Cmder is a **software package** created out of pure frustration over absence of usable console emulator on Windows. It is based on [ConEmu](https://conemu.github.io/) with *major* config overhaul, comes with a Monokai color scheme, amazing [clink](https://chrisant996.github.io/clink/) (further enhanced by [clink-completions](https://github.com/vladimir-kotikov/clink-completions)) and a custom prompt layout.  ## Why use it The main advantage of Cmder is portability. It is designed to be totally self-contained with no external dependencies, which makes it great for **USB Sticks** or **cloud storage**. So you can carry your console, aliases and binaries (like wget, curl and git) with you anywhere. The Cmder's user interface is also designed to be more eye pleasing, and you can compare the main differences between Cmder and ConEmu [here](https://conemu.github.io/en/cmder.html). ## Installation ### Single User Portable Config 1. Download the [latest release](https://github.com/cmderdev/cmder/releases/) 2. Extract the archive. *Note: This path should not be `C:\Program Files` or anywhere else that would require Administrator access for modifying configuration files* 3. (optional) Place your own executable files into the `%cmder_root%\bin` folder to be injected into your PATH. 4. Run `Cmder.exe` ### Shared Cmder install with Non-Portable Individual User Config 1. Download the [latest release](https://github.com/cmderdev/cmder/releases/) 2. Extract the archive to a shared location. 3. (optional) Place your own executable files and custom app folders into the `%cmder_root%\bin`. See: [bin/README.md](./bin/Readme.md) - This folder to be injected into your PATH by default. - See `/max_depth [1-5]` in 'Command Line Arguments for `init.bat`' table to add subdirectories recursively. 4. (optional) Place your own custom app folders into the `%cmder_root%\opt`. See: [opt/README.md](./opt/Readme.md) - This folder will NOT be injected into your PATH so you have total control of what gets added. 5. Run `Cmder.exe` with `/C` command line argument. Example: `cmder.exe /C %userprofile%\cmder_config` * This will create the following directory structure if it is missing. ``` c:\users\[CH JaDi Rajput]\cmder_config ├───bin ├───config │ └───profile.d └───opt ``` - (optional) Place your own executable files and custom app folders into `%userprofile%\cmder_config\bin`. - This folder to be injected into your PATH by default. - See `/max_depth [1-5]` in 'Command Line Arguments for `init.bat`' table to add subdirectories recursively. - (optional) Place your own custom app folders into the `%user_profile%\cmder_config\opt`. - This folder will NOT be injected into your PATH so you have total control of what gets added. * Both the shared install and the individual user config locations can contain a full set of init and profile.d scripts enabling shared config with user overrides. See below. ## Cmder.exe Command Line Arguments | Argument | Description | | ------------------- | ----------------------------------------------------------------------- | | `/C [user_root_path]` | Individual user Cmder root folder. Example: `%userprofile%\cmder_config` | | `/M` | Use `conemu-%computername%.xml` for ConEmu settings storage instead of `user_conemu.xml` | | `/REGISTER [ALL, USER]` | Register a Windows Shell Menu shortcut. | | `/UNREGISTER [ALL, USER]` | Un-register a Windows Shell Menu shortcut. | | `/SINGLE` | Start Cmder in single mode. | | `/START [start_path]` | Folder path to start in. | | `/TASK [task_name]` | Task to start after launch. | | `/X [ConEmu extras pars]` | Forwards parameters to ConEmu | ## Context Menu Integration So you've experimented with Cmder a little and want to give it a shot in a more permanent home; ### Shortcut to open Cmder in a chosen folder 1. Open a terminal as an Administrator 2. Navigate to the directory you have placed Cmder 3. Execute `.\cmder.exe /REGISTER ALL` _If you get a message "Access Denied" ensure you are executing the command in an **Administrator** prompt._ In a file explorer window right click in or on a directory to see "Cmder Here" in the context menu. ## Keyboard shortcuts ### Tab manipulation * <kbd>Ctrl</kbd> + <kbd>T</kbd> : New tab dialog (maybe you want to open cmd as admin?) * <kbd>Ctrl</kbd> + <kbd>W</kbd> : Close tab * <kbd>Ctrl</kbd> + <kbd>D</kbd> : Close tab (if pressed on empty command) * <kbd>Shift</kbd> + <kbd>Alt</kbd> + <kbd>#Number</kbd> : Fast new tab: <kbd>1</kbd> - CMD, <kbd>2</kbd> - PowerShell * <kbd>Ctrl</kbd> + <kbd>Tab</kbd> : Switch to next tab * <kbd>Ctrl</kbd> + <kbd>Shift</kbd> + <kbd>Tab</kbd> : Switch to previous tab * <kbd>Ctrl</kbd> + <kbd>#Number</kbd> : Switch to tab #Number * <kbd>Alt</kbd> + <kbd>Enter</kbd>: Fullscreen ### Shell * <kbd>Ctrl</kbd> + <kbd>Alt</kbd> + <kbd>U</kbd> : Traverse up in directory structure (lovely feature!) * <kbd>End</kbd>, <kbd>Home</kbd>, <kbd>Ctrl</kbd> : Traversing text with as usual on Windows * <kbd>Ctrl</kbd> + <kbd>R</kbd> : History search * <kbd>Shift</kbd> + Mouse : Select and copy text from buffer _(Some shortcuts are not yet documented, though they exist - please document them here)_ ## Features ### Access to multiple shells in one window using tabs You can open multiple tabs each containing one of the following shells: | Task | Shell | Description | | ---- | ----- | ----------- | | Cmder | `cmd.exe` | Windows `cmd.exe` shell enhanced with Git, Git aware prompt, Clink (GNU Readline), and Aliases. | | Cmder as Admin | `cmd.exe` | Administrative Windows `cmd.exe` Cmder shell. | | PowerShell | `powershell.exe` | Windows PowerShell enhanced with Git and Git aware prompt . | | PowerShell as Admin | `powershell.exe` | Administrative Windows `powershell.exe` Cmder shell. | | Bash | `bash.exe` | Unix/Linux like bash shell running on Windows. | | Bash as Admin | `bash.exe` | Administrative Unix/Linux like bash shell running on Windows. | | Mintty | `bash.exe` | Unix/Linux like bash shell running on Windows. See below for Mintty configuration differences | | Mintty as Admin | `bash.exe` | Administrative Unix/Linux like bash shell running on Windows. See below for Mintty configuration differences | Cmder, PowerShell, and Bash tabs all run on top of the Windows Console API and work as you might expect in Cmder with access to use ConEmu's color schemes, key bindings and other settings defined in the ConEmu Settings dialog. ⚠ *NOTE:* Only the full edition of Cmder comes with a pre-installed bash, using a vendored [git-for-windows](https://gitforwindows.org/) installation. The pre-configured Bash tabs may not work on Cmder mini edition without additional configuration. You may however, choose to use an external installation of bash, such as Microsoft's [Subsystem for Linux](https://docs.microsoft.com/en-us/windows/wsl/install-win10) (called WSL) or the [Cygwin](https://cygwin.com/) project which provides POSIX support on windows. ⚠ *NOTE:* Mintty tabs use a program called 'mintty' as the terminal emulator that is not based on the Windows Console API, rather it's rendered graphically by ConEmu. Mintty differs from the other tabs in that it supports xterm/xterm-256color TERM types, and does not work with ConEmu settings like color schemes and key bindings. As such, some differences in functionality are to be expected, such as Cmder not being able to apply a system-wide configuration to it. As a result mintty specific config is done via the `[%USERPROFILE%|$HOME]/.minttyrc` file. You may read more about Mintty and its config file [here](https://github.com/mintty/mintty). An example of setting Cmder portable terminal colors for mintty: From a bash/mintty shell: ``` cd $CMDER_ROOT/vendor git clone https://github.com/karlin/mintty-colors-solarized.git cd mintty-colors-solarized/ echo source \$CMDER_ROOT/vendor/mintty-colors-solarized/mintty-solarized-dark.sh>>$CMDER_ROOT/config/user_profile.sh ``` You may find some Monokai color schemes for mintty to match Cmder [here](https://github.com/oumu/mintty-color-schemes/blob/master/base16-monokai-mod.minttyrc). ### Changing Cmder Default `cmd.exe` Prompt Config File The default Cmder shell `cmd::Cmder` prompt is customized using `Clink` and is configured by editing a config file that exists in one of two locations: - Single User Portable Config `%CMDER_ROOT%\config\cmder_prompt_config.lua` - Shared Cmder install with Non-Portable Individual User Config `%CMDER_USER_CONFIG%\cmder_prompt_config.lua` If your Cmder setup does not have this file create it from `%CMDER_ROOT%\vendor\cmder_prompt_config.lua.default` Customizations include: - Colors. - Single/Multi-line. - Full path/Folder only. - `[user]@[host]` to the beginning of the prompt. - `~` for home directory. - `λ` symbol Documentation is in the file for each setting. ### Changing Cmder Default `cmd.exe` Shell Startup Behaviour Using Task Arguments 1. Press <kbd>Win</kbd> + <kbd>Alt</kbd> + <kbd>T</kbd> 1. Click either: * `1. {cmd::Cmder as Admin}` * `2. {cmd::Cmder}` 1. Add command line arguments where specified below: *Note: Pay attention to the quotes!* ``` cmd /s /k ""%ConEmuDir%\..\init.bat" [ADD ARGS HERE]" ``` ##### Command Line Arguments for `init.bat` | Argument | Description | Default | | ----------------------------- | ---------------------------------------------------------------------------------------------- | ------------------------------------- | | `/c [user cmder root]` | Enables user bin and config folders for 'Cmder as admin' sessions due to non-shared environment. | not set | | `/d` | Enables debug output. | not set | | `/f` | Enables Cmder Fast Init Mode. This disables some features, see pull request [#1492](https://github.com/cmderdev/cmder/pull/1942) for more details. | not set | | `/t` | Enables Cmder Timed Init Mode. This displays the time taken run init scripts | not set | | `/git_install_root [file path]` | User specified Git installation root path. | `%CMDER_ROOT%\vendor\Git-for-Windows` | | `/home [home folder]` | User specified folder path to set `%HOME%` environment variable. | `%userprofile%` | | `/max_depth [1-5]` | Define max recurse depth when adding to the path for `%cmder_root%\bin` and `%cmder_user_bin%` | 1 | | `/nix_tools [0-2]` | Define how `*nix` tools are added to the path. Prefer Windows Tools: 1, Prefer *nix Tools: 2, No `/usr/bin` in `%PATH%`: 0 | 1 | | `/svn_ssh [path to ssh.exe]` | Define `%SVN_SSH%` so we can use git svn with ssh svn repositories. | `%GIT_INSTALL_ROOT%\bin\ssh.exe` | | `/user_aliases [file path]` | File path pointing to user aliases. | `%CMDER_ROOT%\config\user_aliases.cmd` | | `/v` | Enables verbose output. | not set | | (custom arguments) | User defined arguments processed by `cexec`. Type `cexec /?` for more usage. | not set | ### Cmder Shell User Config Single user portable configuration is possible using the cmder specific shell config files. Edit the below files to add your own configuration: | Shell | Cmder Portable User Config | | ------------- | ----------------------------------------- | | Cmder | `%CMDER_ROOT%\config\user_profile.cmd` | | PowerShell | `$ENV:CMDER_ROOT\config\user_profile.ps1` | | Bash/Mintty | `$CMDER_ROOT/config/user_profile.sh` | Note: Bash and Mintty sessions will also source the `$HOME/.bashrc` file if it exists after it sources `$CMDER_ROOT/config/user_profile.sh`. You can write `*.cmd|*.bat`, `*.ps1`, and `*.sh` scripts and just drop them in the `%CMDER_ROOT%\config\profile.d` folder to add startup config to Cmder. | Shell | Cmder `Profile.d` Scripts | | ------------- | -------------------------------------------------- | | Cmder | `%CMDER_ROOT%\config\profile.d\*.bat and *.cmd` | | PowerShell | `$ENV:CMDER_ROOT\config\profile.d\*.ps1` | | Bash/Mintty | `$CMDER_ROOT/config/profile.d/*.sh` | #### Git Status Opt-Out To disable Cmder prompt git status globally add the following to `~/.gitconfig` or locally for a single repo `[repo]/.git/config` and start a new session. *Note: This configuration is not portable* ``` [cmder] status = false # Opt out of Git status for 'ALL' Cmder supported shells. cmdstatus = false # Opt out of Git status for 'Cmd.exe' shells. psstatus = false # Opt out of Git status for 'Powershell.exe and 'Pwsh.exe' shells. shstatus = false # Opt out of Git status for 'bash.exe' shells. ``` ### Aliases #### Cmder(`Cmd.exe`) Aliases You can define simple aliases for `cmd.exe` sessions with a command like `alias name=command`. Cmd.exe aliases support optional parameters through the `$1-9` or the `$*` special characters so the alias `vi=vim.exe $*` typed as `vi [filename]` will open `[filename]` in `vim.exe`. Cmd.exe aliases can also be more complex. See: [DOSKEY.EXE documentation](https://docs.microsoft.com/en-us/windows-server/administration/windows-commands/doskey) for additional details on complex aliases/macros for `cmd.exe` Aliases defined using the `alias.bat` command will automatically be saved in the `%CMDER_ROOT%\config\user_aliases.cmd` file To make an alias and/or any other profile settings permanent add it to one of the following: Note: These are loaded in this order by `$CMDER_ROOT/vendor/init.bat`. Anything stored in `%CMDER_ROOT%` will be a portable setting and will follow cmder to another machine. * `%CMDER_ROOT%\config\profile.d\*.cmd` and `\*.bat` * `%CMDER_ROOT%\config\user_aliases.cmd` * `%CMDER_ROOT%\config\user_profile.cmd` #### Bash.exe|Mintty.exe Aliases Bash shells support simple and complex aliases with optional parameters natively so they work a little different. Typing `alias name=command` will create an alias only for the current running session. To make an alias and/or any other profile settings permanent add it to one of the following: Note: These are loaded in this order by `$CMDER_ROOT/vendor/git-for-windows/etc/profile.d/cmder.sh`. Anything stored in `$CMDER_ROOT` will be a portable setting and will follow cmder to another machine. * `$CMDER_ROOT/config/profile.d/*.sh` * `$CMDER_ROOT/config/user_profile.sh` * `$HOME/.bashrc` If you add bash aliases to `$CMDER_ROOT/config/user_profile.sh` they will be portable and follow your Cmder folder if you copy it to another machine. `$HOME/.bashrc` defined aliases are not portable. #### PowerShell.exe Aliases PowerShell has native simple alias support, for example `[new-alias | set-alias] alias command`, so complex aliases with optional parameters are not supported in PowerShell sessions. Type `get-help [new-alias|set-alias] -full` for help on PowerShell aliases. To make an alias and/or any other profile settings permanent add it to one of the following: Note: These are loaded in this order by `$ENV:CMDER_ROOT\vendor\user_profile.ps1`. Anything stored in `$ENV:CMDER_ROOT` will be a portable setting and will follow cmder to another machine. * `$ENV:CMDER_ROOT\config\profile.d\*.ps1` * `$ENV:CMDER_ROOT\config\user_profile.ps1` ### SSH Agent To start the vendored SSH agent simply call `start-ssh-agent`, which is in the `vendor/git-for-windows/cmd` folder. If you want to run SSH agent on startup, include the line `@call "%GIT_INSTALL_ROOT%/cmd/start-ssh-agent.cmd"` in `%CMDER_ROOT%/config/user_profile.cmd` (usually just uncomment it). ### Vendored Git Cmder is by default shipped with a vendored Git installation. On each instance of launching Cmder, an attempt is made to locate any other user provided Git binaries. Upon finding a `git.exe` binary, Cmder further compares its version against the vendored one _by executing_ it. The vendored `git.exe` binary is _only_ used when it is more recent than the user-installed one. You may use your favorite version of Git by including its path in the `%PATH%` environment variable. Moreover, the **Mini** edition of Cmder (found on the [downloads page](https://github.com/cmderdev/cmder/releases)) excludes any vendored Git binaries. ### Using external Cygwin/Babun, MSys2, WSL, or Git for Windows SDK with Cmder. You may run bash (the default shell used on Linux, macOS and GNU/Hurd) externally on Cmder, using the following instructions: 1. Setup a new task by pressing <kbd>Win</kbd> +<kbd>Alt</kbd> + <kbd>T</kbd>. 1. Click the `+` button to add a task. 1. Name the new task in the top text box. 1. Provide task parameters, this is optional. 1. Add `cmd /c "[path_to_external_env]\bin\bash --login -i" -new_console` to the `Commands` text box. **Recommended Optional Steps:** Copy the `vendor/cmder_exinit` file to the Cygwin/Babun, MSys2, or Git for Windows SDK environments `/etc/profile.d/` folder to use portable settings in the `$CMDER_ROOT/config` folder. Note: MinGW could work if the init scripts include `profile.d` but this has not been tested. The destination file extension depends on the shell you use in that environment. For example: * bash - Copy to `/etc/profile.d/cmder_exinit.sh` * zsh - Copy to `/etc/profile.d/cmder_exinit.zsh` Uncomment and edit the below line in the script to use Cmder config even when launched from outside Cmder. ``` # CMDER_ROOT=${USERPROFILE}/cmder # This is not required if launched from Cmder. ``` ### Customizing user sessions using `init.bat` custom arguments. You can pass custom arguments to `init.bat` and use `cexec.cmd` in your `user_profile.cmd` to evaluate these arguments then execute commands based on a particular flag being detected or not. `init.bat` creates two shortcuts for using `cexec.cmd` in your profile scripts. #### `%ccall%` - Evaluates flags, runs commands if found, and returns to the calling script and continues. ``` ccall=call C:\Users\user\cmderdev\vendor\bin\cexec.cmd ``` Example: `%ccall% /startnotepad start notepad.exe` #### `%cexec%` - Evaluates flags, runs commands if found, and does not return to the calling script. ``` cexec=C:\Users\user\cmderdev\vendor\bin\cexec.cmd ``` Example: `%cexec% /startnotepad start notepad.exe` It is useful when you have multiple tasks to execute `cmder` and need it to initialize the session differently depending on the task chosen. To conditionally start `notepad.exe` when you start a specific `cmder` task: * Press <kbd>win</kbd>+<kbd>alt</kbd>+<kbd>t</kbd> * Click `+` to add a new task. * Add the below to the `Commands` block: ```batch cmd.exe /k ""%ConEmuDir%\..\init.bat" /startnotepad" ``` * Add the below to your `%cmder_root%\config\user_profile.cmd` ```batch %ccall% "/startNotepad" "start" "notepad.exe"` ``` To see detailed usage of `cexec`, type `cexec /?` in cmder. ### Integrating Cmder with [Hyper](https://github.com/zeit/hyper), [Microsoft VS Code](https://code.visualstudio.com/), and your favorite IDEs Cmder by default comes with a vendored ConEmu installation as the underlying terminal emulator, as stated [here](https://conemu.github.io/en/cmder.html). However, Cmder can in fact run in a variety of other terminal emulators, and even integrated IDEs. Assuming you have the latest version of Cmder, follow the following instructions to get Cmder working with your own terminal emulator. For instructions on how to integrate Cmder with your IDE, please read our [Wiki section](https://github.com/cmderdev/cmder/wiki#cmder-integration). ## Upgrading The process of upgrading Cmder depends on the version/build you are currently running. If you have a `[cmder_root]/config/user[-|_]conemu.xml`, you are running a newer version of Cmder, follow the below process: 1. Exit all Cmder sessions and relaunch `[cmder_root]/cmder.exe`, this backs up your existing `[cmder_root]/vendor/conemu-maximus5/conemu.xml` to `[cmder_root]/config/user[-|_]conemu.xml`. * The `[cmder_root]/config/user[-|_]conemu.xml` contains any custom settings you have made using the 'Setup Tasks' settings dialog. 2. Exit all Cmder sessions and backup any files you have manually edited under `[cmder_root]/vendor`. * Editing files under `[cmder_root]/vendor` is not recommended since you will need to re-apply these changes after any upgrade. All user customizations should go in `[cmder_root]/config` folder. 3. Delete the `[cmder_root]/vendor` folder. 4. Extract the new `cmder.zip` or `cmder_mini.zip` into `[cmder_root]/` overwriting all files when prompted. If you do not have a `[cmder_root]/config/user[-|_]conemu.xml`, you are running an older version of cmder, follow the below process: 1. Exit all Cmder sessions and backup `[cmder_root]/vendor/conemu-maximus5/conemu.xml` to `[cmder_root]/config/user[-|_]conemu.xml`. 2. Backup any files you have manually edited under `[cmder_root]/vendor`. * Editing files under `[cmder_root]/vendor` is not recommended since you will need to re-apply these changes after any upgrade. All user customizations should go in `[cmder_root]/config` folder. 3. Delete the `[cmder_root]/vendor` folder. 4. Extract the new `cmder.zip` or `cmder_mini.zip` into `[cmder_root]/` overwriting all files when prompted. ## Current development builds You can download builds of the current development branch by going to AppVeyor via the following link: [](https://ci.appveyor.com/project/MartiUK/cmder/branch/master/artifacts) ## License All software included is bundled with own license The MIT License (MIT) Copyright (c) 2016 Samuel Vasko Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.