Blog

  • tf-docker-swarm

    Docker Swarm for AWS (Terraform)

    This is an open Docker Swarm for AWS deployment based on Terraform and custom supporting images.

    Why?

    This is using mostly public code images (Working on a replacement for l4controller) instead of the “hidden” images docker use and the containers can run on non musl libc hosts.
    Docker just released the Docker for AWS as a fire and forget thing, no docs, no anything and while I have no docs yet, the “how it works” is there in the open as the code for everything is on github.

    There is still some cleanup to do and add it to travis, tests, etc. but it works as is.

    Project Components

    Terraform:

    For the VPC and ELB modules, you can BYO as long as it complies with the inputs and outputs. (I use something based on https://github.com/segmentio/stack that will publish soon)

    Images:

    • guide-aws
      This container guides the cluster instances through its lifecycle performing maintenance tasks.

    • meta-aws
      Provides a simple Flask based metadata service, primarely gives Swarm tokens based on the EC2 instance SG.

    • init-aws
      Initializes the EC2 instance, gets AWS info, inits or joins the Swarm, updates DynamoDB entry, etc.

    • status-aws
      Provides a simple Flask based status endpoint based on the Docker Engine status.

    • elb-aws
      Dynamically updates the cluster ELB based on the published services running on the cluster.

    Documentation for each image is found under the image README.md

    Development

    Initialize

    After cloning the repo, please run:

    pip install pre-commit # If not installed
    pre-commit install
    

    Add your modules to the modules directory and then run terraform normally

    Visit original content creator repository
    https://github.com/pecigonzalo/tf-docker-swarm

  • gmock

    说明

    gmock主要是为了简化单元测试而设计。

    原理

    对常用的服务本地内存实现。

    目标

    不改变现有代码的情况下,能快速对原有代码进行单元测试(叫集成测试更准确)。

    示例

    package main
    
    import (
    	"context"
    	"fmt"
    	"github.com/jinzhu/gorm"
    	"github.com/sjqzhang/gmock"
    	"github.com/sjqzhang/gmock/mockdb"
    	"github.com/sjqzhang/gmock/mockdocker"
    	_ "gorm.io/driver/mysql"
    	gormv2 "gorm.io/gorm"
    	"io/ioutil"
    	"net/http"
    	"time"
    	"xorm.io/xorm"
    )
    
    type User struct {
    	Id   int    `json:"id"`
    	Name string `json:"name"`
    	Age  int    `json:"age"`
    }
    
    func main() {
    	testMockGORM()
    	testMockGORMV2()
    	testMockXORM()
    	testMockRedis()
    	testMockHttpServer()
    	testMockDocker()
    	testDBUtil()
    
    }
    
    func testMockGORM() {
    	var db *gorm.DB
    	mockdb.DBType = "mysql"
    	mock := gmock.NewMockGORM("example", func(gorm *mockdb.MockGORM) {
    		db = gorm.GetGormDB()
    	})
    	fmt.Println(mock.GetDSN())
    	//mock.RegisterModels(&User{})
    	mock.InitSchemas(`CREATE TABLE user (
                               id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
                               age int(3) DEFAULT NULL,
                               name varchar(255) DEFAULT NULL COMMENT '名称',
                               PRIMARY KEY (id)
    ) ENGINE=InnoDB ;`)
    	mock.ResetAndInit()
    
    	var user User
    	err := db.Where("id=?", 1).Find(&user).Error
    	if err != nil {
    		panic(err)
    	}
    	if user.Id != 1 {
    		panic(fmt.Errorf("testMockGORM error"))
    	}
    
    }
    
    func testDBUtil() {
    	util := gmock.NewDBUtil()
    	util.RunMySQLServer("test", 33333, false)
    	db, err := gorm.Open("mysql", "user:pass@tcp(127.0.0.1:33333)/test?charset=utf8mb4&parseTime=True&loc=Local")
    	if err != nil {
    		panic(err)
    	}
    	sqlText := util.ReadFile("./example/ddl.txt")
    	for _, s := range util.ParseSQLText(sqlText) {
    		fmt.Println(db.Exec(s))
    	}
    	fmt.Println(util.QueryListBySQL(db.DB(), "select * from project"))
    }
    
    func testMockGORMV2() {
    	mockdb.DBType = "mysql"
    	var db *gormv2.DB
    	mock := gmock.NewMockGORMV2("example", func(orm *mockdb.MockGORMV2) {
    		db = orm.GetGormDB()
    	})
    	//注册模型
    	mock.RegisterModels(&User{})
    	//初始化数据库及表数据
    	mock.ResetAndInit()
    	mock.ResetAndInit()
    	//db := mock.GetGormDB()
    	var user User
    	err := db.Where("id=?", 1).Find(&user).Error
    	if err != nil {
    		panic(err)
    	}
    	if user.Id != 1 {
    		panic(fmt.Errorf("testMockGORMV2 error"))
    	}
    
    }
    
    func testMockRedis() {
    	server := gmock.NewMockRedisServer(63790)
    	client := server.GetRedisClient()
    	ctx := context.Background()
    	key := "aa"
    	value := "aa value"
    	pool := server.GetRedigoPool()
    	conn := pool.Get()
    	defer conn.Close()
    	rep, err := conn.Do("set", key, value)
    	if err != nil {
    		panic(err)
    	}
    	fmt.Println(rep)
    	//client.Set(ctx, key, value, time.Second*10)
    	cmd := client.Get(ctx, key)
    	if cmd.Val() != value {
    		panic("testMockRedis error")
    	}
    
    }
    
    func testMockHttpServer() {
    	// 只支持 http 不支持 https
    	server := gmock.NewMockHttpServer("./", []string{"www.baidu.com", "www.jenkins.org"})
    	server.InitMockHttpServer()
    	//server.SetReqRspHandler(func(req *mockhttp.Request, rsp *mockhttp.Response) {
    	//	req.Method = "GET"
    	//	req.Endpoint = "/HelloWorld"
    	//	req.Host = "www.baidu.com"
    	//	rsp.Body = "xxxxxxxxx bbbb"
    	//})
    	resp, err := http.Get("http://www.baidu.com/hello/xxx")
    	if err != nil {
    		panic(err)
    	}
    	data, err := ioutil.ReadAll(resp.Body)
    	if err != nil {
    		panic(err)
    	}
    	if string(data) != "hello baidu" {
    		panic(fmt.Errorf("testMockHttpServer error"))
    	}
    }
    
    func testMockXORM() {
    	var engine *xorm.Engine
    	mockdb.DBType = "mysql"
    	mock := gmock.NewMockXORM("example", func(orm *mockdb.MockXORM) {
    		engine = orm.GetXORMEngine()
    	})
    	mock.RegisterModels(&User{})
    
    	mock.ResetAndInit()
    	db := mock.GetXORMEngine()
    	var user User
    	_, err := db.Where("id=?", 1).Get(&user)
    	if err != nil {
    		panic(err)
    	}
    	if user.Id != 1 {
    		panic(fmt.Errorf("testMockXORM error"))
    	}
    }
    
    func testMockDocker() {
    	mock := mockdocker.NewMockDockerService()
    	defer mock.Destroy()
    	err := mock.InitContainerWithCmd(func(cmd *string) {
    		//  注意:容器必须后台运行,否则会挂起,程序不会继续执行,所以要保证你的容器后台运行不退出
    		*cmd = "docker run --name some-mysql  -p 3308:3306 -e MYSQL_ROOT_PASSWORD=root -d mysql:5.7"
    	})
    	fmt.Println(err)
    	if !mock.WaitForReady("wget 127.0.0.1:3308 -O -", time.Second*50) {
    		panic(fmt.Errorf("mysql start fail"))
    	}
    	fmt.Println("mysql start success")
    
    }

    生成覆盖率及测试报告

    go test -timeout 0 -covermode=count -coverprofile=coverage.out  -run="^Test" -coverpkg=package1,package2  
    
    go tool cover -html=coverage.out -o coverage.html
    

    Visit original content creator repository
    https://github.com/sjqzhang/gmock

  • orchard-street-wordlists

    Orchard Street Wordlists

    Fresh wordlists for all your passphrase-creation needs. Use these wordlists to create strong, secure passphrases, either with dice or a password generator, which are built-in to some password managers.

    • Made up of common words found in (English) Wikipedia and Google Books
    • Uniquely decodable, and thus safe to combine words in passphrases without a delimiter
    • Free of profane words, abbreviations, and British spellings
    • Available in a variety of lengths for different use-cases

    NOTE: These lists are occasionally edited. If you want a static, unchanging copy of any of the word lists, feel free to download the lists as they are currently, download the latest tag/release, or fork this repository at any time. See licensing information below.

    Orchard Street Long List

    The Orchard Street Long List is a 17,576-word list. It provides a hefty 14.1 bits of entropy per word, meaning a 7-word passphrase gives almost 99 bits of entropy.

    List length               : 17576 words
    Mean word length          : 7.98 characters
    Length of shortest word   : 3 characters (add)
    Length of longest word    : 15 characters (troubleshooting)
    Free of prefix words?     : false
    Uniquely decodable?       : true
    Entropy per word          : 14.101 bits
    Efficiency per character  : 1.767 bits
    Above brute force line?   : true
    Mean edit distance        : 7.915
    
    Word samples
    ------------
    plank billionaire evaluated punched proficiency positioned
    symptom commensurate spit connector misguided royalties
    brokerage losers policy diagram graceful publishing
    successors redesigned companions intrusion alternatives cleaned
    rationalism coupons cosmos clarification translation blaming
    

    Orchard Street Medium List

    The Orchard Street Medium List has 8,192 (213) words. This length is optimized for binary computers and their random number generators. It gives a nice round 13.00 bits of entropy per word, which makes entropy calculations a bit easier for us humans.

    List length               : 8192 words
    Mean word length          : 7.07 characters
    Length of shortest word   : 3 characters (add)
    Length of longest word    : 10 characters (worthwhile)
    Free of prefix words?     : false
    Uniquely decodable?       : true
    Entropy per word          : 13.000 bits
    Efficiency per character  : 1.839 bits
    Above brute force line?   : true
    Mean edit distance        : 6.966
    
    Word samples
    ------------
    adding pilots maximal website opponent attraction
    dispatched confirms chapter eagle brains arising
    brethren nations palms vaccine relocation basis
    motorway tidal jewelry warn alleged courtesy
    impacts nature gauge quartz provisions exam
    

    This list is used by the Buttercup password manager.

    Orchard Street Diceware List

    The Orchard Street Diceware List is our version of the classic Diceware list. With this list, you can use dice to create a secure passphrase. This list’s 7,776 words gives 12.925 bits of entropy per word, the same as the EFF long word list.

    This list is also available without corresponding dice roll numbers prepended.

    List length               : 7776 words
    Mean word length          : 7.05 characters
    Length of shortest word   : 3 characters (add)
    Length of longest word    : 10 characters (worthwhile)
    Free of prefix words?     : false
    Uniquely decodable?       : true
    Entropy per word          : 12.925 bits
    Efficiency per character  : 1.832 bits
    Above brute force line?   : true
    Mean edit distance        : 6.954
    
    Word samples
    ------------
    believing drawing advocate mechanism slaves panel
    lecturer institutes encourages assists rovers injected
    checked liberals thirteen posting frigate mayo
    monitored ruler mean renewal liquid requiring
    polished cardiac injuries challenge coherence legs
    

    This list is an option for users of Strongbox password manager.

    Orchard Street Short Lists

    Orchard Street Alpha and Orchard Street QWERTY lists both have 1,296 words and are optimized for inputting resulting passphrases into devices like smart TVs or video game consoles. Each word gives a passphrase an additional 10.34 bits of entropy.

    The difference between these two lists are which keyboard layout they are optimized for. Use the Alpha list if your device’s keyboard is laid out alphabetically; use the QWERTY list if it is closer to the QWERTY layout.

    Orchard Street Alpha list

    List length               : 1296 words
    Mean word length          : 4.12 characters
    Length of shortest word   : 3 characters (add)
    Length of longest word    : 7 characters (stopped)
    Free of prefix words?     : false
    Uniquely decodable?       : true
    Entropy per word          : 10.340 bits
    Efficiency per character  : 2.509 bits
    Above brute force line?   : true
    Mean edit distance        : 4.043
    
    Word samples
    ------------
    deity jazz cad bay beg lest
    fees kind fell sell toys shoots
    hints new stops food tell ideas
    toad died must road net feet
    die sold leg done peer tour
    

    Orchard Street QWERTY List

    List length               : 1296 words
    Mean word length          : 4.24 characters
    Length of shortest word   : 3 characters (add)
    Length of longest word    : 8 characters (referred)
    Free of prefix words?     : false
    Uniquely decodable?       : true
    Entropy per word          : 10.340 bits
    Efficiency per character  : 2.441 bits
    Above brute force line?   : true
    Mean edit distance        : 4.170
    
    Word samples
    ------------
    pine mod polo egg three whip
    zen ties cadet wars sweat tier
    unity jam tire egg idea hull
    sent kiss open fife reader will
    mute mecca drugs rent turn den
    

    For more information on these short lists and their specific use-cases, see this GitHub repo and/or this blog post.

    Online passphrase generator that uses some of these wordlists.

    You can use StrongPhrase.net to generate passphrases from the Long and QWERTY lists. Source code available on GitLab.

    FAQ

    Check our FAQ for answers to frequently asked questions.

    Licensing

    Creative Commons License
    This work is licensed under the Creative Commons Attribution-ShareAlike 4.0 International License.

    Sources of words and other legal notes

    The words contained in these word lists are primarily taken from two sources: Google Books Ngram data (2012 data) and Wikipedia, via a Wikipedia word frequency project, taken in June 2023.

    This project has no association with Google, Wikipedia, or the creators of the Wikipedia frequency project cited above. To my knowledge, Google, Wikipedia, nor the creators of the Wikipedia word frequency project cited above endorses this project.

    At that time that words were pulled from Wikipedia, Wikipedia text was licensed under the Creative Commons Attribution-ShareAlike 4.0 International License (“CC BY-SA 4.0”), and thus I am using that license for this project.

    Visit original content creator repository https://github.com/sts10/orchard-street-wordlists
  • ProblemSolving

    ProblemSolving

    A repository documenting my daily problem solving to prepare for Google interviews 😅

    Many of the problems solved are just done as a daily practice. But for UVa problems, they are listed in a book called Competitive Programming 3 by Steven & Felix Halim and i’m solving them while reading the book.

    I’m also writing a documentation for the different algorithms and datastructures used: Documentation link.

    Solved problems from Google Kick Start :

    Problem name Solutions Year Round
    Transform The String Java 2021 H
    Centauri Prime Java 2022 session 1
    Sample Problem Java 2022 session 1

    Solved problems from UVa Online Judge :

    Problem name Solutions Tags Book section
    146 – ID Codes Java Next permutation, Reverse, Swap, Array 2.2. Linear DS with Built-in Libraries
    272. TEX Quotes Java Ad hoc, String 1.3.3 Time to Start the Journey
    230 – Borrowers Java HashMap, ArrayList, Sort 2.2. Linear DS with Built-in Libraries
    278 – Chess Java Ad hoc, Chess 1.4 The Ad Hoc Problems
    462 – Bridge Hand Evaluator Java Ad hoc, Simulation, Cards 1.4 The Ad Hoc Problems
    489 – Hangman Judge Java Ad hoc, Games 1.4 The Ad Hoc Problems
    573 – The Snail Java Ad hoc 1.3.3 Time to Start the Journey
    665 – False coin Java Array 2.2. Linear DS with Built-in Libraries
    696 – How Many Knights Java Ad hoc, Chess 1.4 The Ad Hoc Problems
    732 – Anagrams by Stack Java Stack, LinkedList, Backtracking, Recursive 2.2. Linear DS with Built-in Libraries
    790 – Head Judge Headache Java Sort, Array 2.2. Linear DS with Built-in Libraries
    939 – Genes Java BST, TreeMap 2.3 Non-Linear DS with Built-in Libraries
    1124 – Celebrity jeopardy Java Ad hoc 1.3.3 Time to Start the Journey
    10038 – Jolly Jumpers Java Array 2.2. Linear DS with Built-in Libraries
    10107 – What is the Median? Java Array, Median, QuickSelect 2.2. Linear DS with Built-in Libraries
    10114 – Loansome Car Buyer Java Ad hoc, Simulation 1.3.3 Time to Start the Journey
    10141 – Request for Proposal Java Ad hoc 1.3.3 Time to Start the Journey
    10189 – Minesweeper Java Ad hoc, Games 1.4 The Ad Hoc Problems
    10258 – Contest Scoreboard Java List, Sort 2.2. Linear DS with Built-in Libraries
    10264 – The Most Potent Corner Java Bitmask 2.2. Linear DS with Built-in Libraries
    10284 – Chessboard in FEN Java Ad hoc, Chess 1.4 The Ad Hoc Problems
    10550 – Combination Lock Java Ad hoc, Simulation 1.3.3 Time to Start the Journey
    10646 – What is the Card? Java Ad hoc, Cards 1.4 The Ad Hoc Problems
    10855 – Rotated square Java Matrix, Rotation 2.2. Linear DS with Built-in Libraries
    10920 – Spiral Tap Java Simulation, Matrix 2.2. Linear DS with Built-in Libraries
    11044 – Searching for Nessy Java Ad hoc, Math 1.3.3 Time to Start the Journey
    11093 – Just Finish it up Java Array 2.2. Linear DS with Built-in Libraries
    11172 – Relational Operator Java Ad hoc, Comparison 1.3.3 Time to Start the Journey
    11173 – Grey Codes Java Bitmask 2.2. Linear DS with Built-in Libraries
    11340 – Newspaper Java Array, Hashing, Large input 2.2. Linear DS with Built-in Libraries
    11364 – Parking Java Ad hoc, Math 1.3.3 Time to Start the Journey
    11498 – Division of Nlogonia Java Ad hoc, Coordinates 1.3.3 Time to Start the Journey
    11507 – Bender B. Rodriguez Problem Java Ad hoc 1.3.3 Time to Start the Journey
    11547 – Automatic Answer Java Ad hoc, Math 1.3.3 Time to Start the Journey
    11559 – Event Planning Java Ad hoc, Simulation 1.3.3 Time to Start the Journey
    11581 – Grid Successors Java Matrix, Graph 2.2. Linear DS with Built-in Libraries
    11727 – Cost Cutting Java Ad hoc, Sort 1.3.3 Time to Start the Journey
    11799 – Horror Dash Java Ad hoc 1.3.3 Time to Start the Journey
    11926 – Multitasking Java BitSet 2.2. Linear DS with Built-in Libraries
    11933 – Splitting Numbers Java Bitmask, Least significant bit 2.2. Linear DS with Built-in Libraries
    11988 – Broken Keyboard (a.k.a. Beiju Text) Java LinkedList 2.2. Linear DS with Built-in Libraries
    12150 – Pole Position Java Array 2.2. Linear DS with Built-in Libraries
    12187 – Brothers Java Matrix 2.2. Linear DS with Built-in Libraries
    12207 – That is Your Queue Java Queue, Deque 2.2. Linear DS with Built-in Libraries
    12247 – Jollo Java Ad hoc, Cards 1.4 The Ad Hoc Problems
    12250 – Language Detection Java Ad hoc 1.3.3 Time to Start the Journey
    12279 – Emoogle Balance Java Ad hoc 1.3.3 Time to Start the Journey
    12289 – One-Two-Three Java Ad hoc 1.3.3 Time to Start the Journey
    12291 – Polyomino Composer Java Matrix 2.2. Linear DS with Built-in Libraries
    12356 – Army Buddies Java Array, Buffers, Linked list 2.2. Linear DS with Built-in Libraries
    12372 – Packing for Holiday Java Ad hoc 1.3.3 Time to Start the Journey
    12398 – NumPuzz I Java Matrix 2.2. Linear DS with Built-in Libraries
    12541 – Birthdates Java Sort 2.2. Linear DS with Built-in Libraries

    Solved problems from leetcode.com :

    Problem name Solutions Tags
    1. Two Sum Java Array, Hash Table
    2. Add Two Numbers Java Linked List, Math, Recursion
    3. Longest Substring Without Repeating Characters Java Hash Table, String, Sliding Window
    4. Median of Two Sorted Arrays Java Array, Binary Search Divide and Conquer
    5. Longest Palindromic Substring Java String, Dynamic Programming
    6. Zigzag Conversion Java String
    7. Reverse Integer Java Math
    8. String to Integer (atoi) Java String
    9. Palindrome Number Java Math
    10. Regular Expression Matching Java String, Dynamic Programming, Recursion
    13. Roman to Integer Java Hash Table, Math, String
    14. Longest Common Prefix Java String
    20. Valid Parentheses Java String, Stack
    21. Merge Two Sorted Lists Java Linked List, Recursion
    23. Merge k Sorted Lists Java Linked List, Divide And Conquer
    24. Swap Nodes in Pairs Java Linked List, Recursion
    39. Combination Sum Java Array, Backtracking
    61. Rotate List Java Linked List, Two Pointers
    71. Simplify Path Java String, Stack
    78. Subsets Java Array, Backtracking, Bit Manipulation
    80. Remove Duplicates from Sorted Array II Java Array, Two Pointers
    82. Remove Duplicates from Sorted List II Java Linked List, Two Pointers
    84. Largest Rectangle in Histogram Java Array, Stack, Monotonic Stack
    104. Maximum Depth of Binary Tree Java Tree, Depth-First Search
    121. Best Time to Buy and Sell Stock Java Array, Dynamic Programming
    127. Word Ladder Java Hash Table, String, Breadth-First Search
    133. Clone Graph Java Hash Table, Depth-First Search, Graph, Breadth-First Search
    134. Gas Station Java Array, Greedy
    136. Single Number Java Array, Bit Manipulation
    138. Copy List with Random Pointer Java Hash Table, Linked List
    141. Linked List Cycle Java Hash Table, Linked List, Two Pointers
    148. Sort List Java Linked List, Two Pointers, Sorting, Divide And Conquer, Merge Sort
    165. Compare Version Numbers Java String, Two Pointers
    169. Majority Element Java Array, Hash Table, Divide And Conquer, Sorting, Counting
    171. Excel Sheet Column Number Java String, Math
    189. Rotate Array Java Array, Math, Two Pointers
    211. Design Add and Search Words Data Structure Java String, Depth-First Search, Design, Tree
    228. Summary Ranges Java Array
    258. Add Digits Java Math, Simulation, Number Theory
    316. Remove Duplicate Letters Java String, Stack, Greedy, Monotonic Stack
    338. Counting Bits Java Dynamic Programming, Bit Manipulation
    389. Find the Difference Java Hash Table, String, Bit Manipulation
    392. Is Subsequence Java Two Pointers, String, Dynamic Programming
    402. Remove K Digits Java String, Stack, Greedy, Monotonic Stack
    413. Arithmetic Slices Java Array, Dynamic Programming
    421. Maximum XOR of Two Numbers in an Array Java Array, Hash Table, Bit Manipulation, Binary Tree
    438. Find All Anagrams in a String Java Hash Table, String, Sliding Window
    454. 4Sum II Java Hash Table, Arrays
    520. Detect Capital Java String
    525. Contiguous Array Java Array, Hash Table, Prefix Sum
    532. K-diff Pairs in an Array Java Array, Hash Table, Two Pointers
    560. Subarray Sum Equals K Java Array, Hash Table, Prefix Sum
    567. Permutation in String Java Hash Table, Two Pointers, String
    662. Maximum Width of Binary Tree Java Tree, Depth-First Search, Breadth-First Search, Binary Tree
    740. Delete and Earn Java Array, Hash Table, Dynamic Programming
    799. Champagne Tower Java Dynamic Programming
    847. Shortest Path Visiting All Nodes Java Dynamic Programming, Bit Manipulation, Breadth-First Search, Graph, Bitmask
    856. Score of Parentheses Java String, Stack
    895. Maximum Frequency Stack Java Hash Table, Stack, Design, Ordered Set
    941. Valid Mountain Array Java Array
    946. Validate Stack Sequences Java Array, Stack, Simulation
    1249. Minimum Remove to Make Valid Parentheses Java String, Stack
    1288. Remove Covered Intervals Java Array, Sorting
    1291. Sequential Digits Java Enumeration
    1305. All Elements in Two Binary Search Trees Java Binary Search Tree, Depth-First Search, Inorder Traversal
    1359. Count All Valid Pickup and Delivery Options Java Dynamic Programming, Math, Combinatorics
    1510. Stone Game IV Java Math, Dynamic Programming, Game Theory
    1672. Richest Customer Wealth Java Array, Matrix
    1675. Minimize Deviation in Array Java Array, Greedy, Heap, Priority Queue


    Visit original content creator repository
    https://github.com/touir1/ProblemSolving

  • gitexpo

    Gitexpo

    A new Github code explorer

    Directories

    api: Query of GraphQL API
    apollo: Configuration of Apollo Client and connect to Apollo Server
    components: Components used in the project
    config: Urls and Ports used in the project
    constants: Constants value and action types
    contexts: Providers for Context API
    css: Styles and fonts
    helpers: Helper functions
    options: All of options used for dropdown and selecbox component
    pages: pages of sites
    public: Images and gifs and static files
    reducers: Reducers for Context API

    Structures

    Main file address: app/index.tsx

    How to Use:

    yarn install && yarn start

    Runs the app in the development mode.
    Open http://localhost:3000 to view it in the browser.

    The page will reload if you make edits.
    You will also see any lint errors in the console.

    yarn build

    Builds the app for production to the build folder.
    It correctly bundles React in production mode and optimizes the build for the best performance.

    The build is minified and the filenames include the hashes.
    Your app is ready to be deployed!

    See the section about deployment for more information.

    Testing

    yarn test

    Launches the test runner in the interactive watch mode.
    See the section about running tests for more information.

    Visit original content creator repository
    https://github.com/MattRoseDev/gitexpo

  • SwiftSummarize

    SwiftSummarize

    SwiftSummarize is the easiest way to create a summary from a String. Internally it’s a simple wrapper around CoreServices SKSummary

    Before

    Here’s to the crazy ones. The misfits. The rebels. The troublemakers. The round pegs in the square holes. The ones who see things differently. They’re not fond of rules. And they have no respect for the status quo. You can quote them, disagree with them, glorify or vilify them. About the only thing you can’t do is ignore them. Because they change things. They push the human race forward. And while some may see them as the crazy ones, we see genius. Because the people who are crazy enough to think they can change the world, are the ones who do.

    After

    Because the people who are crazy enough to think they can change the world, are the ones who do

    Install

    Add this url to your dependencies:

    https://github.com/StefKors/SwiftSummarize
    

    Example

    let input = """
    Here's to the crazy ones. The misfits. The rebels. The troublemakers. The
    round pegs in the square holes. The ones who see things differently. They're not
    fond of rules. And they have no respect for the status quo. You can quote them,
    disagree with them, glorify or vilify them. About the only thing you can't do is ignore
    them. Because they change things. They push the human race forward. And while some
    may see them as the crazy ones, we see genius. Because the people who are crazy
    enough to think they can change the world, are the ones who do.  
    """
    
    let summary = Summary(text, numberOfSentences: 1)
    
    print(summary.output)
    // Because the people who are crazy enough to think they can change the world, are the ones who do

    Or use it directly on Strings with the extension

    let input = """
    Here's to the crazy ones. The misfits. The rebels. The troublemakers. The
    round pegs in the square holes. The ones who see things differently. They're not
    fond of rules. And they have no respect for the status quo. You can quote them,
    disagree with them, glorify or vilify them. About the only thing you can't do is ignore
    them. Because they change things. They push the human race forward. And while some
    may see them as the crazy ones, we see genius. Because the people who are crazy
    enough to think they can change the world, are the ones who do.  
    """
    
    let output = input.summarize(numberOfSentences: 1)
    
    print(output)
    // Because the people who are crazy enough to think they can change the world, are the ones who do

    A full SwiftUI code example can be found at /Example/ExampleSwiftUI.swift

    preview

    Visit original content creator repository https://github.com/StefKors/SwiftSummarize
  • bundle-hazelcast-3n4n5-app-pado

    PadoGrid PadoGrid | Catalogs | Manual | FAQ | Releases | Templates | Pods | Kubernetes | Docker | Apps | Quick Start


    PadoGrid 1.x Host OS Docker

    App: Pado

    The pado app provides a Hazelcast Portable class generator and CSV file import tools for Hazelcast. This bundle includes step-by-step instructions for ingesting mock data and UCI Machine Learning datasets into Hazelcast. It also includes a Pado scheduler demo that automates scheduled job executions for exporting and importing data from databases.

    Installing Bundle

    install_bundle -download bundle-hazelcast-3n4n5-app-pado

    ❗️ The Pado scheduler currently does not support Cygwin.

    Use Case

    This use case introduces Pado for ingesting CSV file contents in the form of VersionedPortable objects into a Hazelcast cluster.

    Pado CVS Import Flow

    Bundle Contents

    apps
    └── pado
    
    docker
    └── mysql

    Building Pado

    cd_app pado/bin_sh
    ./build_app

    The build_app script builds and deploys Pado in the pado app directory. You can check the directory contents as follows, where <version> is the Pado version.

    ls ../pado_<version>

    Pado CSV data Directory

    The Pado CSV data directory structure includes the import directory where you place the CSV files to import and the schema directory in which you provide schema files that define how to parse the CSV files. Pado automatically moves the successfully imported files from the import directory to the processed directory. It moves the unsuccessful ones in the error directory.

    data
    ├── error
    ├── import
    ├── processed
    └── schema

    Running Pado CSV Importer

    The Pado CSV importer facility automatically generates schema files, generates and compiles VersionedPortable classes, and imports CSV file contents into Hazelcast in the form of VersionedPortable objects. The imported data can then be viewed using the desktop app. These steps are shown in sequence below.

    1. Place CSV files in the data/import/ directory.
    2. Generate schema files using the CSV files in data/import/.
    3. Generate VersionedPortable source code.
    4. Compile and create a VersionedPortable jar file.
    5. Deploy the generated jar file to a Hazelcast cluster and add the Portable factory class in hazelcast.xml.
    6. Start a Hazelcast cluster.
    7. Import CSV files.
    8. View imported data using the desktop app.

    NW Demo

    For our demo, let’s import the NW sample data included in the Pado distribution into Hazelcast. To import data in CSV files, you need to first generate schema files. Pado provides the generate_schema command which auto-generates schema files based on CSV file contents. Once you have schema files ready, then you can generate Hazelcast VersionedPortable classes by executing the generate_versioned_portable command.

    1. Create a Hazelcast cluster.

    For our demo, we will use the default cluster, myhz, which can be created as follows.

    create_cluster -product hazelcast -cluster myhz
    1. Change directory to the pado directory and copy the NW CSV files to the import directory.
    cd_app pado
    cd pado_<version>
    
    # Copy CSV files into data/import
    cp -r data/nw/import data/
    1. Generate schema files.

    Generate schema files for the nw data

    # Generate schema files. The following command generates schema files in the
    # data/schema/generated directory.
    cd_app pado
    cd pado_<version>
    cd bin_sh/hazelcast
    ./generate_schema
    
    # Move the generated schema files to data/schema.
    mv ../../data/schema/generated/* ../../data/schema/

    ❗️ If generate_schema fails due to a Java path issue, then you can set JAVA_HOME in the setenv.sh file as shown below.

    # pado_<version>/bin_sh/setenv.sh
    vi ../setenv.sh
    1. Generate VersionedPortable source code. The following command reads schema files located in data/schema/ and generates the corresponding `VersionedPortable Java source code.
    # Generate VersionedPortable classes with the factory ID of 30000 and the
    # start class ID of 30000.
    ./generate_versioned_portable  -fid 30000 -cid 30000
    1. Compile and create jar file.
    ./compile_generated_code
    1. Deploy the generated jar file to a Hazelcast cluster and add the Portable factory class ID in hazelcast.xml.
    # Copy the jar file to the padogrid workspace plugins directory
    cp ../../dropins/generated.jar $PADOGRID_WORKSPACE/plugins/
    
    # Add the Portable factory class ID in hazelcast.xml
    switch_cluster myhz
    
    # In hazelcast.xml, add the serialization configuration outputted by
    # the generate_versioned_portable command in step 3.
    vi etc/hazelcast.xml

    Find the <serialization> element in ect/hazelast.xml and add the <portable-factory> element shown below.

                 <serialization>
                     ...
                     <portable-factories>
                         ...
                         <portable-factory factory-id="30000">
                              org.hazelcast.data.PortableFactoryImpl
                         </portable-factory>
                         ...
                     </portable-factories>
                 </serialization>
    1. Start Hazelcast cluster
    start_cluster
    1. Import CSV files.
    cd_app pado
    cd pado_<version>/bin_sh/hazelcast
    ./import_csv
    1. View imported data using the desktop app.

    If you have not installed HazelcastDesktop then install it now as follows.

    install_padogrid -product hazelcast-desktop 
    update_padogrid -product hazelcast-desktop

    Create and update a HazelcastDesktop app as follows.

    # Create a HazelcastDesktop app
    create_app -product hazelcast -app desktop
    
    # Change directory to desktop
    cd_app desktop
    
    # Edit etc/pado.properties
    vi etc/pado.properties

    Enter serialization configuration in pado.properties:

    hazelcast.client.config.serialization.portable.factories=1:org.hazelcast.demo.nw.data.PortableFactoryImpl,\
    10000:org.hazelcast.addon.hql.impl.PortableFactoryImpl,\
    30000:org.hazelcast.data.PortableFactoryImpl

    Run desktop

    cd bin_sh
    ./desktop

    WSL Users

    If you are using WSL without the X Server then set the correct Windows JAVA_HOME path run ‘desktop.bat’ as follows.

    # Change directory where the Windows scripts are located.
    cd bin_win
    
    # Set Windows JAVA_HOME path. If it's already set gobally then you do not need to
    # set it again in setenv.bat
    vi setenv.bat
    
    # Run desktop from WSL
    cmd.exe /c desktop.bat

    Dataset Examples

    The following links provide Pado instructions for ingesting downloadable datasets.

    Scheduler Demo

    Pado includes an ETL scheduler that automates exporting data from databases and importing them into Hazelcast clusters. You create and schedule jobs in JSON to periodically export data from any databases via JDBC. Each job defines the required JDBC connectivity and driver information and one or more grid paths (map names) with their query strings and scheduled time information.

    Once you have created jobs, you can run them immediately without having the scheduler enabled. This allows you to quickly test your configurations but more importantly, generate the required schema files. You would generate the schema files in the same way as you did in the NW Demo section. The default scheduler directory is data/scheduler and has the same hierarchy as the CSV data directory described previously in the Pado CSV data Directory section.

    data/scheduler
    ├── error
    ├── import
    ├── processed
    └── schema

    To run the scheduler demo, you need read/write access to a database. For our demo, we will be using the MySQL Docker container.

    1. Run MySQL and Adminer container using docker compose.
    cd_docker mysql
    docker compose up

    MySQL root account is setup as follows:

    Parameter Value
    Adminer URL http://localhost:8080
    MySQL User root
    MySQL Password rootpw
    MySQL Port 3306
    1. Create the nw database using Adminer.
    • Login to MySQL from Adminer URL
    • Select SQL command from Adminer
    • Execute the following:
    create database nw;
    1. Ingest data into MySQL using the perf_test app.
    # Create perf_test_mysql
    create_app -product hazelcast -name perf_test_mysql
    
    # Edit hibernate.cfg-mysql.xml
    cd_app perf_test_mysql
    vi etc/hibernate.cfg-mysql.xml

    Enter the MySQL root password in hibernate.cfg-mysql.xml:

            <property name="connection.username">root</property>
            <property name="connection.password">rootpw</property>

    Install the MySQL JDBC driver by building the app.

    cd bin_sh
    ./build_app

    Ingest data into MySQL.

    ./test_group -db -run -prop ../etc/group-factory.properties
    1. To use the Pado scheduler, you need to encrypt the password as follows. Copy the encrypted password, which we will insert in the job file in step 6.
    cd_app pado
    cd pado_<version>/bin_sh/tools
    ./encryptor
    1. Copy the scheduler template directory and create jobs that dump database tables to CSV files.
    # Copy the entire template scheduler directory
    cd_app pado
    cd pado_<version>
    cp -r data/template/scheduler data/
    
    # IMPORTANT: Remove the files that came with the template. We don't need them.
    rm data/scheduler/etc/*
    rm data/scheduler/schema/*

    Create the mysql.json file.

    cd data/scheduler/etc
    vi mysql.json
    
    1. Enter query information in the mysql.json file as shown below. Copy/paste the encrypted database password in the file. Set the GridId attribute to the Hazelcast cluster name. Set the Path attributes to the map names.
    {
            "Driver": "com.mysql.cj.jdbc.Driver",
            "Url": "jdbc:mysql://localhost:3306/nw?allowPublicKeyRetrieval=true&serverTimezone=America/New_York",
            "User": "root",
            "Password": "<paste the encrypted password here>",
            "Delimiter": ",",
            "Null": "'\\N'",
            "GridId": "myhz",
            "Paths": [
                    {
                            "Path": "nw/customers",
                            "Columns": "customerId, address, city, companyName, contactName, contactTitle, country, fax, phone, postalCode, region",
                            "Query": "select * from nw.customers",
                            "Day": "Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday",                                "Time": "00:00:00"
                    },
                    {
                            "Path": "nw/orders",
                            "Columns": "orderId, customerId, employeeId, freight, orderDate, requiredDate, shipAddress, shipCity, shipCountry, shipName, shipPostalCode, shipRegion, shipVia, shippedDate",
                            "Query": "select * from nw.orders",
                            "Day": "Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday",                                "Time": "00:00:00, 01:00:00, 02:00:00, 03:00:00, 04:00:00, 05:00:00, 06:00:00, 07:00:00, 08:00:00, 09:00:00, 10:00:00, 11:00:00, 12:00:00, 13:00:00, 14:00:00, 15:00:00, 16:00:00, 17:00:00, 18:00:00, 19:00:00, 20:00:00, 21:00:00, 22:00:00, 23:00:00"
                    }
            ]
    }

    Note that serverTimezone is set to America/New_York for the JDBC URL. Without it, you may see the following exception if your MySQL uses the system timezone and unable to calculate the dates due to the leap year.

    com.mysql.cj.exceptions.WrongArgumentException: HOUR_OF_DAY: 2 -> 3

    We have configured two (2) jobs in the mysql.json file. The first job downloads the customers table every midnight and the second job downloads the orders table every hour. We could have configured with more practical queries like downloading just the last hour’s worth of orders, for example. For the demo purpose, let’s keep it simple and fluid. Our main goal is to ingest the database data into Hazelcast.

    1. We need to create the schema files for properly reading and transforming CSV file contents to Hazelcast objects. We can manually create the schema files or simply generate them. To generate the schema files, we need CSV files. This is done by executing the import_scheduler -now command which generates CSV files without scheduling the jobs in the default directory, data/scheduler/import.
    cd_app pado
    cd pado_<version>/bin_sh/hazelcast
    ./import_scheduler -now
    1. Generate schema files using the downloaded data files.
    ./generate_schema -schemaDir data/scheduler/schema -dataDir data/scheduler/import -package org.hazelcast.data.demo.nw
    1. Generate the corresponding VersionedPortable source code in the default directory, src/generated.
    ./generate_versioned_portable -schemaDir data/scheduler/schema -fid 20000 -cid 20000
    1. Compile the generated code and deploy the generated jar file to the workspace plugins directory so that it will be included in the cluster class path.
    ./compile_generated_code
    cp ../../dropins/generated.jar $PADOGRID_WORKSPACE/plugins/
    1. Configure Hazelcast with the generated PortableFactoryImpl class.
    cd_cluster
    vi etc/hazelcast.xml

    Enter the following in hazelcast.xml:

        <serialization>
            <portable-factories>
                <portable-factory factory-id="20000">
                     org.hazelcast.data.demo.nw.PortableFactoryImpl
                </portable-factory>
            </portable-factories>
        </serialization>
    1. Restart cluster.
    stop_cluster
    start_cluster
    1. Import CSV file contents to the cluster.
    cd_app pado
    cd pado_<version>/bin_sh/hazelcast
    ./import_scheduler -import
    1. Run Desktop
    cd_app desktop
    
    # Edit etc/pado.properties
    vi etc/pado.properties

    Enter serialization configuration in pado.properties:

    hazelcast.client.config.serialization.portable.factories=1:org.hazelcast.demo.nw.data.PortableFactoryImpl,\
    10000:org.hazelcast.addon.hql.impl.PortableFactoryImpl,\
    30000:org.hazelcast.data.PortableFactoryImpl,\
    20000:org.hazelcast.data.demo.nw.PortableFactoryImpl
    cd bin_sh
    ./desktop

    Teardown

    # Exit Hazelcast Desktop
    
    # Stop Hazelcast cluster
    stop_cluster
    
    # Stop MySQL container
    cd_docker mysql
    docker compose down

    About Pado

    Pado is authored by Dae Song Park (email:dspark@netcrest.com) to bring linear scalability to IMDG for storing Big Data. His architecture achieves this by logically federating data grids and providing an abstract API layer that not only hides the complexity of the underlying IMDG API but introduces new Big Data capabilities that IMDG products lack today. He coined the terms grids within grid and grid of grids to illustrate his architecture which spans in-memory data across a massive number of clusters with a universal namespace similar to URL for easy data access.

    Pado for Geode 1.x and GemFire 9.x is part of PadoGrid and installed by running install_padogrid -product pado.

    Pado for GemFire 8.x is available from GitHub.

    The PadoGrid project borrows many architecture and script ideas from Pado.

    References

    1. Pado, Grid of Grids, https://github.com/netcrest/pado
    2. UCI Machine Learning Repository Datasets

    PadoGrid PadoGrid | Catalogs | Manual | FAQ | Releases | Templates | Pods | Kubernetes | Docker | Apps | Quick Start

    Visit original content creator repository https://github.com/padogrid/bundle-hazelcast-3n4n5-app-pado
  • bundle-hazelcast-3n4n5-app-pado

    PadoGrid PadoGrid | Catalogs | Manual | FAQ | Releases | Templates | Pods | Kubernetes | Docker | Apps | Quick Start


    PadoGrid 1.x Host OS Docker

    App: Pado

    The pado app provides a Hazelcast Portable class generator and CSV file import tools for Hazelcast. This bundle includes step-by-step instructions for ingesting mock data and UCI Machine Learning datasets into Hazelcast. It also includes a Pado scheduler demo that automates scheduled job executions for exporting and importing data from databases.

    Installing Bundle

    install_bundle -download bundle-hazelcast-3n4n5-app-pado

    ❗️ The Pado scheduler currently does not support Cygwin.

    Use Case

    This use case introduces Pado for ingesting CSV file contents in the form of VersionedPortable objects into a Hazelcast cluster.

    Pado CVS Import Flow

    Bundle Contents

    apps
    └── pado
    
    docker
    └── mysql

    Building Pado

    cd_app pado/bin_sh
    ./build_app

    The build_app script builds and deploys Pado in the pado app directory. You can check the directory contents as follows, where <version> is the Pado version.

    ls ../pado_<version>

    Pado CSV data Directory

    The Pado CSV data directory structure includes the import directory where you place the CSV files to import and the schema directory in which you provide schema files that define how to parse the CSV files. Pado automatically moves the successfully imported files from the import directory to the processed directory. It moves the unsuccessful ones in the error directory.

    data
    ├── error
    ├── import
    ├── processed
    └── schema

    Running Pado CSV Importer

    The Pado CSV importer facility automatically generates schema files, generates and compiles VersionedPortable classes, and imports CSV file contents into Hazelcast in the form of VersionedPortable objects. The imported data can then be viewed using the desktop app. These steps are shown in sequence below.

    1. Place CSV files in the data/import/ directory.
    2. Generate schema files using the CSV files in data/import/.
    3. Generate VersionedPortable source code.
    4. Compile and create a VersionedPortable jar file.
    5. Deploy the generated jar file to a Hazelcast cluster and add the Portable factory class in hazelcast.xml.
    6. Start a Hazelcast cluster.
    7. Import CSV files.
    8. View imported data using the desktop app.

    NW Demo

    For our demo, let’s import the NW sample data included in the Pado distribution into Hazelcast. To import data in CSV files, you need to first generate schema files. Pado provides the generate_schema command which auto-generates schema files based on CSV file contents. Once you have schema files ready, then you can generate Hazelcast VersionedPortable classes by executing the generate_versioned_portable command.

    1. Create a Hazelcast cluster.

    For our demo, we will use the default cluster, myhz, which can be created as follows.

    create_cluster -product hazelcast -cluster myhz
    1. Change directory to the pado directory and copy the NW CSV files to the import directory.
    cd_app pado
    cd pado_<version>
    
    # Copy CSV files into data/import
    cp -r data/nw/import data/
    1. Generate schema files.

    Generate schema files for the nw data

    # Generate schema files. The following command generates schema files in the
    # data/schema/generated directory.
    cd_app pado
    cd pado_<version>
    cd bin_sh/hazelcast
    ./generate_schema
    
    # Move the generated schema files to data/schema.
    mv ../../data/schema/generated/* ../../data/schema/

    ❗️ If generate_schema fails due to a Java path issue, then you can set JAVA_HOME in the setenv.sh file as shown below.

    # pado_<version>/bin_sh/setenv.sh
    vi ../setenv.sh
    1. Generate VersionedPortable source code. The following command reads schema files located in data/schema/ and generates the corresponding `VersionedPortable Java source code.
    # Generate VersionedPortable classes with the factory ID of 30000 and the
    # start class ID of 30000.
    ./generate_versioned_portable  -fid 30000 -cid 30000
    1. Compile and create jar file.
    ./compile_generated_code
    1. Deploy the generated jar file to a Hazelcast cluster and add the Portable factory class ID in hazelcast.xml.
    # Copy the jar file to the padogrid workspace plugins directory
    cp ../../dropins/generated.jar $PADOGRID_WORKSPACE/plugins/
    
    # Add the Portable factory class ID in hazelcast.xml
    switch_cluster myhz
    
    # In hazelcast.xml, add the serialization configuration outputted by
    # the generate_versioned_portable command in step 3.
    vi etc/hazelcast.xml

    Find the <serialization> element in ect/hazelast.xml and add the <portable-factory> element shown below.

                 <serialization>
                     ...
                     <portable-factories>
                         ...
                         <portable-factory factory-id="30000">
                              org.hazelcast.data.PortableFactoryImpl
                         </portable-factory>
                         ...
                     </portable-factories>
                 </serialization>
    1. Start Hazelcast cluster
    start_cluster
    1. Import CSV files.
    cd_app pado
    cd pado_<version>/bin_sh/hazelcast
    ./import_csv
    1. View imported data using the desktop app.

    If you have not installed HazelcastDesktop then install it now as follows.

    install_padogrid -product hazelcast-desktop 
    update_padogrid -product hazelcast-desktop

    Create and update a HazelcastDesktop app as follows.

    # Create a HazelcastDesktop app
    create_app -product hazelcast -app desktop
    
    # Change directory to desktop
    cd_app desktop
    
    # Edit etc/pado.properties
    vi etc/pado.properties

    Enter serialization configuration in pado.properties:

    hazelcast.client.config.serialization.portable.factories=1:org.hazelcast.demo.nw.data.PortableFactoryImpl,\
    10000:org.hazelcast.addon.hql.impl.PortableFactoryImpl,\
    30000:org.hazelcast.data.PortableFactoryImpl

    Run desktop

    cd bin_sh
    ./desktop

    WSL Users

    If you are using WSL without the X Server then set the correct Windows JAVA_HOME path run ‘desktop.bat’ as follows.

    # Change directory where the Windows scripts are located.
    cd bin_win
    
    # Set Windows JAVA_HOME path. If it's already set gobally then you do not need to
    # set it again in setenv.bat
    vi setenv.bat
    
    # Run desktop from WSL
    cmd.exe /c desktop.bat

    Dataset Examples

    The following links provide Pado instructions for ingesting downloadable datasets.

    Scheduler Demo

    Pado includes an ETL scheduler that automates exporting data from databases and importing them into Hazelcast clusters. You create and schedule jobs in JSON to periodically export data from any databases via JDBC. Each job defines the required JDBC connectivity and driver information and one or more grid paths (map names) with their query strings and scheduled time information.

    Once you have created jobs, you can run them immediately without having the scheduler enabled. This allows you to quickly test your configurations but more importantly, generate the required schema files. You would generate the schema files in the same way as you did in the NW Demo section. The default scheduler directory is data/scheduler and has the same hierarchy as the CSV data directory described previously in the Pado CSV data Directory section.

    data/scheduler
    ├── error
    ├── import
    ├── processed
    └── schema

    To run the scheduler demo, you need read/write access to a database. For our demo, we will be using the MySQL Docker container.

    1. Run MySQL and Adminer container using docker compose.
    cd_docker mysql
    docker compose up

    MySQL root account is setup as follows:

    Parameter Value
    Adminer URL http://localhost:8080
    MySQL User root
    MySQL Password rootpw
    MySQL Port 3306
    1. Create the nw database using Adminer.
    • Login to MySQL from Adminer URL
    • Select SQL command from Adminer
    • Execute the following:
    create database nw;
    1. Ingest data into MySQL using the perf_test app.
    # Create perf_test_mysql
    create_app -product hazelcast -name perf_test_mysql
    
    # Edit hibernate.cfg-mysql.xml
    cd_app perf_test_mysql
    vi etc/hibernate.cfg-mysql.xml

    Enter the MySQL root password in hibernate.cfg-mysql.xml:

            <property name="connection.username">root</property>
            <property name="connection.password">rootpw</property>

    Install the MySQL JDBC driver by building the app.

    cd bin_sh
    ./build_app

    Ingest data into MySQL.

    ./test_group -db -run -prop ../etc/group-factory.properties
    1. To use the Pado scheduler, you need to encrypt the password as follows. Copy the encrypted password, which we will insert in the job file in step 6.
    cd_app pado
    cd pado_<version>/bin_sh/tools
    ./encryptor
    1. Copy the scheduler template directory and create jobs that dump database tables to CSV files.
    # Copy the entire template scheduler directory
    cd_app pado
    cd pado_<version>
    cp -r data/template/scheduler data/
    
    # IMPORTANT: Remove the files that came with the template. We don't need them.
    rm data/scheduler/etc/*
    rm data/scheduler/schema/*

    Create the mysql.json file.

    cd data/scheduler/etc
    vi mysql.json
    
    1. Enter query information in the mysql.json file as shown below. Copy/paste the encrypted database password in the file. Set the GridId attribute to the Hazelcast cluster name. Set the Path attributes to the map names.
    {
            "Driver": "com.mysql.cj.jdbc.Driver",
            "Url": "jdbc:mysql://localhost:3306/nw?allowPublicKeyRetrieval=true&serverTimezone=America/New_York",
            "User": "root",
            "Password": "<paste the encrypted password here>",
            "Delimiter": ",",
            "Null": "'\\N'",
            "GridId": "myhz",
            "Paths": [
                    {
                            "Path": "nw/customers",
                            "Columns": "customerId, address, city, companyName, contactName, contactTitle, country, fax, phone, postalCode, region",
                            "Query": "select * from nw.customers",
                            "Day": "Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday",                                "Time": "00:00:00"
                    },
                    {
                            "Path": "nw/orders",
                            "Columns": "orderId, customerId, employeeId, freight, orderDate, requiredDate, shipAddress, shipCity, shipCountry, shipName, shipPostalCode, shipRegion, shipVia, shippedDate",
                            "Query": "select * from nw.orders",
                            "Day": "Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday",                                "Time": "00:00:00, 01:00:00, 02:00:00, 03:00:00, 04:00:00, 05:00:00, 06:00:00, 07:00:00, 08:00:00, 09:00:00, 10:00:00, 11:00:00, 12:00:00, 13:00:00, 14:00:00, 15:00:00, 16:00:00, 17:00:00, 18:00:00, 19:00:00, 20:00:00, 21:00:00, 22:00:00, 23:00:00"
                    }
            ]
    }

    Note that serverTimezone is set to America/New_York for the JDBC URL. Without it, you may see the following exception if your MySQL uses the system timezone and unable to calculate the dates due to the leap year.

    com.mysql.cj.exceptions.WrongArgumentException: HOUR_OF_DAY: 2 -> 3

    We have configured two (2) jobs in the mysql.json file. The first job downloads the customers table every midnight and the second job downloads the orders table every hour. We could have configured with more practical queries like downloading just the last hour’s worth of orders, for example. For the demo purpose, let’s keep it simple and fluid. Our main goal is to ingest the database data into Hazelcast.

    1. We need to create the schema files for properly reading and transforming CSV file contents to Hazelcast objects. We can manually create the schema files or simply generate them. To generate the schema files, we need CSV files. This is done by executing the import_scheduler -now command which generates CSV files without scheduling the jobs in the default directory, data/scheduler/import.
    cd_app pado
    cd pado_<version>/bin_sh/hazelcast
    ./import_scheduler -now
    1. Generate schema files using the downloaded data files.
    ./generate_schema -schemaDir data/scheduler/schema -dataDir data/scheduler/import -package org.hazelcast.data.demo.nw
    1. Generate the corresponding VersionedPortable source code in the default directory, src/generated.
    ./generate_versioned_portable -schemaDir data/scheduler/schema -fid 20000 -cid 20000
    1. Compile the generated code and deploy the generated jar file to the workspace plugins directory so that it will be included in the cluster class path.
    ./compile_generated_code
    cp ../../dropins/generated.jar $PADOGRID_WORKSPACE/plugins/
    1. Configure Hazelcast with the generated PortableFactoryImpl class.
    cd_cluster
    vi etc/hazelcast.xml

    Enter the following in hazelcast.xml:

        <serialization>
            <portable-factories>
                <portable-factory factory-id="20000">
                     org.hazelcast.data.demo.nw.PortableFactoryImpl
                </portable-factory>
            </portable-factories>
        </serialization>
    1. Restart cluster.
    stop_cluster
    start_cluster
    1. Import CSV file contents to the cluster.
    cd_app pado
    cd pado_<version>/bin_sh/hazelcast
    ./import_scheduler -import
    1. Run Desktop
    cd_app desktop
    
    # Edit etc/pado.properties
    vi etc/pado.properties

    Enter serialization configuration in pado.properties:

    hazelcast.client.config.serialization.portable.factories=1:org.hazelcast.demo.nw.data.PortableFactoryImpl,\
    10000:org.hazelcast.addon.hql.impl.PortableFactoryImpl,\
    30000:org.hazelcast.data.PortableFactoryImpl,\
    20000:org.hazelcast.data.demo.nw.PortableFactoryImpl
    cd bin_sh
    ./desktop

    Teardown

    # Exit Hazelcast Desktop
    
    # Stop Hazelcast cluster
    stop_cluster
    
    # Stop MySQL container
    cd_docker mysql
    docker compose down

    About Pado

    Pado is authored by Dae Song Park (email:dspark@netcrest.com) to bring linear scalability to IMDG for storing Big Data. His architecture achieves this by logically federating data grids and providing an abstract API layer that not only hides the complexity of the underlying IMDG API but introduces new Big Data capabilities that IMDG products lack today. He coined the terms grids within grid and grid of grids to illustrate his architecture which spans in-memory data across a massive number of clusters with a universal namespace similar to URL for easy data access.

    Pado for Geode 1.x and GemFire 9.x is part of PadoGrid and installed by running install_padogrid -product pado.

    Pado for GemFire 8.x is available from GitHub.

    The PadoGrid project borrows many architecture and script ideas from Pado.

    References

    1. Pado, Grid of Grids, https://github.com/netcrest/pado
    2. UCI Machine Learning Repository Datasets

    PadoGrid PadoGrid | Catalogs | Manual | FAQ | Releases | Templates | Pods | Kubernetes | Docker | Apps | Quick Start

    Visit original content creator repository https://github.com/padogrid/bundle-hazelcast-3n4n5-app-pado
  • internet_time

    Internet Time Calculator 2.0

    This C program calculates Swatch Internet Time, a revolutionary concept that could have changed how people measure time. <insert sarcasm flag here> In this alternate reality where Internet Time became the global standard, this tool would be essential for daily time management!

    Internet Time divides the day into 1000 ‘beats’, abolishing time zones and providing a universal time for everyone.

    Features

    • Real-time Beat Calculation: Current beat (@) based on Internet Time
    • Advanced Time Conversion: Convert beats back to standard time
    • Multiple Output Formats: Customizable display formats
    • Timezone Support: Handle different timezone offsets (-12 to +14 hours)
    • Watch Mode: Continuous real-time updates
    • Verbose Mode: Detailed time information and context
    • Internet Date Display: Show dates in Internet Time format
    • Local Time Support: Use system local time instead of UTC
    • Portable & Lightweight: Minimal dependencies, runs everywhere
    • Perfect Integration: Works seamlessly with tmux, status bars, and scripts

    Installation

    Prerequisites

    • C compiler (GCC, Clang, or similar)
    • Make (optional, for easier building)

    Quick Build

    git clone <repository-url>
    cd internet_time
    make

    Development Build (with debug symbols)

    make debug

    System Installation

    make install         # Install to /usr/local/bin (requires sudo)
    # or
    PREFIX=$HOME/.local make install  # Install to user directory

    Usage

    Basic Usage

    # Current Internet Time
    ./internet_time
    # Output: @347.22
    
    # With timezone offset (+3 hours)
    ./internet_time -t 3
    # Output: @472.45
    
    # Using local time
    ./internet_time -l
    # Output: @123.78

    Advanced Features

    # Convert beats to standard time
    ./internet_time -b 500
    # Output: @500.00 = 12:00:00 BMT (Biel Mean Time)
    
    # Verbose output with details
    ./internet_time -v
    # Output: Detailed time breakdown with context
    
    # Show Internet date
    ./internet_time -d
    # Output: Internet Date: 2024.215 (Year 2024, Day 215)
    
    # Watch mode (updates every second)
    ./internet_time -w
    # Output: Continuous real-time updates
    
    # Custom format (zero-padded integer)
    ./internet_time -f '@%04.0f'
    # Output: @0347

    Practical Examples

    # Status bar integration
    ./internet_time -f '%04.0f'  # Clean format for bars
    
    # Time zone conversion
    ./internet_time -t -5        # Eastern Standard Time
    ./internet_time -t 9         # Japan Standard Time
    
    # Business meeting scheduler
    ./internet_time -v           # Get full context for scheduling

    Command Line Options

    Option Description Example
    -t <offset> Timezone offset in hours (-12 to +14) -t 2
    -f <format> Custom output format -f '@%04.0f'
    -l Use local time instead of UTC -l
    -b <beats> Convert beats to standard time -b 500
    -d Show Internet date format -d
    -v Verbose output with details -v
    -w Watch mode (continuous updates) -w
    -h Show help -h

    Format Specifiers

    • %f – Float beats (e.g., 347.22)
    • %d – Integer beats (e.g., 347)
    • %3d – Padded integer beats (e.g., 347)
    • %04d – Zero-padded integer beats (e.g., 0347)

    Integration Examples

    tmux Status Bar

    Add to your .tmux.conf:

    set-option -ag status-right ' #[fg=cyan,bg=default]@#(internet_time -f "%.0f")'

    Bash Prompt

    Add to your .bashrc:

    export PS1='[\u@\h \W @$(internet_time -f "%.0f")] \$ '

    Shell Script Integration

    #!/bin/bash
    current_beat=$(internet_time -f "%.0f")
    if [ $current_beat -lt 500 ]; then
        echo "Good morning! It's @$current_beat"
    else
        echo "Good evening! It's @$current_beat"
    fi

    About Internet Time

    In this alternate reality where Internet Time became the global standard:

    • No Time Zones: Universal time for all
    • 1000 Beats per Day: Each beat = 1 minute 26.4 seconds
    • BMT Reference: Biel Mean Time (UTC+1) as the base
    • Beat Periods:
      • 0-249: Morning beats
      • 250-499: Afternoon beats
      • 500-749: Evening beats
      • 750-999: Night beats

    Development

    Building & Testing

    make clean && make    # Clean build
    make test            # Run basic tests
    make debug           # Debug version
    make format          # Format code

    Contributing

    1. Fork the repository
    2. Create a feature branch
    3. Make your changes
    4. Test thoroughly
    5. Submit a pull request

    License

    BSD 3-Clause License – see LICENSE file for details.

    Bug Reports

    Report bugs to: crg@crg.eti.br


    In a world where Internet Time ruled supreme, this would be an essential tool!

    Contributing

    Contributions are welcome! Follow these steps to contribute:

    1. Fork the repository.
    2. Create a new branch (git checkout -b feature-branch).
    3. Commit your changes (git commit -m 'Add new feature').
    4. Push to the branch (git push origin feature-branch).
    5. Open a Pull Request.

    License

    This project is licensed under the BSD 3-Clause License. See the LICENSE file for details.

    Visit original content creator repository
    https://github.com/crgimenes/internet_time

  • html2pdf-go

    html2pdf-go

    A simple Go package for converting HTML content to PDF using the chromedp package under the hood. The package supports concurrent PDF generation through a worker pool system.

    Features

    • Concurrent HTML to PDF conversion using Chrome/Chromium
    • Configurable worker pool size
    • Customizable PDF output settings (page size, margins, etc.)
    • Support for waiting for page load and animations
    • Automatic retry mechanism for failed conversions
    • Debug logging option
    • Resource cleanup with graceful shutdown

    Installation

    go get github.com/xarunoba/html2pdf-go

    Ensure you have Chrome or Chromium installed on your system.

    Usage

    Basic Usage

    package main
    
    import (
        "context"
        "log"
        "os"
    
        "github.com/xarunoba/html2pdf-go"
    )
    
    func main() {
        // Create a new converter with default options
        converter, err := html2pdf.New()
        if err != nil {
            log.Fatal(err)
        }
        defer converter.Close()
    
        // HTML content to convert
        html := `
            <!DOCTYPE html>
            <html>
                <body>
                    <h1>Hello, World!</h1>
                </body>
            </html>
        `
    
        // Convert HTML to PDF
        ctx := context.Background()
        pdf, err := converter.Convert(ctx, html)
        if err != nil {
            log.Fatal(err)
        }
    
        // Save the PDF
        if err := os.WriteFile("output.pdf", pdf, 0644); err != nil {
            log.Fatal(err)
        }
    }

    For more usage examples, please check the examples directory.

    Configuration Options

    Converter Options

    Option Description Default
    WithWorkers Number of concurrent Chrome tabs 1
    WithTimeout Maximum time for conversion 60 seconds
    WithRetryAttempts Number of retry attempts 3
    WithRetryDelay Delay between retries 1 second
    WithDebug Enable debug logging false

    PDF Options

    Option Description Default
    WithPaperSize Page width and height (inches) 8.5 x 11
    WithMargins Page margins (inches) 0.5 all sides
    WithWaitForLoad Wait for page to load true
    WithWaitForAnimations Animation completion wait time 500ms
    WithWaitTimeout Maximum wait time 60s

    Considerations

    • This package uses Chrome/Chromium for PDF generation, which provides excellent CSS support and rendering accuracy but may not be the fastest solution for high-volume processing.
    • Memory usage scales with the number of workers, as each worker maintains a Chrome tab.
    • Performance can vary based on system resources, document complexity, and whether the HTML contains external resources.
    • For production environments, consider running benchmarks to determine optimal worker pool size for your use case.

    Requirements

    • Go 1.23 or later
    • Chrome or Chromium browser installed on the system

    Visit original content creator repository
    https://github.com/xarunoba/html2pdf-go