syslog.warten.de

Solutions for Tour of Go Exercises

I am learning Go and the Tour of Go, an interactive tutorial, is a good resource to start with. At the end of each section is a series of exercises for the reader to complete.

Unfortunately, there are no example solutions for those exercises, so its hard to get a hint if you are stuck. Therefore I post my solutions here. I do not think they are the best way to solve the problems in Go (as I said, I am learning too) but code is compiling and it is doing what it should. You are welcome to comment with your solutions and code improvements.

  • Exercise: Loops and Functions

    package main
    
    import (
        "fmt"
        "math"
    )
    
    func Sqrt(x float64) float64 {
        var z, d float64 = x, 1
        for d > 1e-15 {
            z0 := z
            z = z-(z*z-x)/(2*z)
            d = z-z0
            if d < 0 {
                d = -d
            }
        }
        return z
    }
    
    func main() {
        fmt.Println(Sqrt(2))
        fmt.Println(math.Sqrt(2))
    }
    
  • Exercise: Maps

    package main
    
    import (
        "strings"
        "tour/wc"
    )
    
    func WordCount(s string) map[string]int {
        m:=make(map[string]int)
        for _, w:=range strings.Fields(s) {
            m[w]++
        }
        return m
    }
    
    func main() {
        wc.Test(WordCount)
    }
    
  • Exercise: Slices

    package main
    
    import "tour/pic"
    
    func Pic(dx, dy int) [][]uint8 {
        mat := make([][]uint8, dx) 
        for i := range mat { 
            mat[i] = make([]uint8, dy)
        for j := range mat[i] {
            mat[i][j]=uint8(i^j)
        }
        }
        return mat
    }
    
    func main() {
        pic.Show(Pic)
    }
    
  • Exercise: Fibonacci closure

    package main
    
    import "fmt"
    
    // fibonacci is a function that returns
    // a function that returns an int.
    func fibonacci() func() int {
        var a, b, count int = 0, 1, 0
        return func() int {
            switch count {
            case 0:  count++
                 return a 
            case 1:  count++
                 return b
            }
            sum:=a b
            a=b
            b=sum
            return sum
        }
    }
    
    func main() {
        f := fibonacci()
        for i := 0; i < 10; i   {
            fmt.Println(f())
        }
    }
    
  • Advanced Exercise: Complex cube roots

    package main
    
    import (
        "fmt"
        "math"
        "cmath"
    )
    
    func Cbrt(x complex128) complex128 {
        var z complex128 = x
        for i:=0; i 1e-15 {
            z0 := z
            z = z-(z*z-f)/(2*z)
            d = z-z0
            if d < 0 {
                d = -d
            }
        }
        return z, nil
    }
    
    func main() {
        if value, err := Sqrt(-2.0); err != nil {
            fmt.Println(err)
        } else {
            fmt.Println(value)
        }
    }
    
  • Exercise: Images

    package main
    
    import (
        "image"
        "tour/pic"
    )
    
    type Image struct {}
    
    func (m *Image) ColorModel() image.ColorModel {
        return image.RGBAColorModel
    }
    
    func (m *Image) Bounds() image.Rectangle {
        return image.Rect(0,0,256,256)
    }
    
    func (m *Image) At(x, y int) image.Color {
        v:=uint8(x^y)
        return image.RGBAColor{v, v, 255, 255}
    }
    
    func main() {
        m := &Image{}
        pic.ShowImage(m)
    }
    
  • Exercise: Rot13 Reader

    package main
    
    import (
        "io"
        "os"
        "strings"
    )
    
    type rot13Reader struct {
        r io.Reader
    }
    
    func rot13(b byte) byte { 
        switch { 
            case 'A' <= b && b <= 'M': 
                    b = (b - 'A')+'N' 
            case 'N' <= b && b <= 'Z': 
                    b = (b - 'N')+'A' 
            case 'a' <= b && b <= 'm': 
                    b = (b - 'a')+'n' 
            case 'n' <= b && b <= 'z': 
                    b = (b - 'n')+'a' 
        } 
        return b 
    }
    
    func (b *rot13Reader) Read(p []byte) (n int, err os.Error) {
        n, err = b.r.Read(p)
        for i:=range(p[:n]) {
            p[i]=rot13(p[i])
        }
        return
    }
    
    func main() {
        s := strings.NewReader(
            "Lbh penpxrq gur pbqr!")
        r := rot13Reader{s}
        io.Copy(os.Stdout, &r)
    }
    
  • Exercise: Equivalent Binary Trees

    package main
    
    import (
        "fmt"
        "tour/tree"
    )
    
    // Walk walks the tree t sending all values
    // from the tree to the channel ch.
    func Walk(t *tree.Tree, ch chan int) {
        if t.Left != nil {
            Walk(t.Left, ch)
        }
        ch<-t.Value
        if t.Right != nil {
            Walk(t.Right, ch)
        }
    }
    
    // Same determines whether the trees
    // t1 and t2 contain the same values.
    func Same(t1, t2 *tree.Tree) bool {
        ch1:=make(chan int)
        ch2:=make(chan int)
        go Walk(t1, ch1)
        go Walk(t2, ch2)
        for i:=0; i<10; i++ {
            if <-ch1 != <-ch2 {
                return false
            }
        }
        return true
    }   
    
    func main() {
        ch := make(chan int)
        go Walk(tree.New(1), ch)
    
        for i:=0; i<10; i++ {
            fmt.Println(<-ch)
        }
    
        fmt.Println("Equivalent Binary Trees?",
            Same(tree.New(1), tree.New(1)))
    
        fmt.Println("Equivalent Binary Trees?",
            Same(tree.New(1), tree.New(2)))
    }
    
  • Exercise: Web Crawler

    package main
    
    import (
        "os"
        "fmt"
    )
    
    type Fetcher interface {
        // Fetch returns the body of URL and
        // a slice of URLs found on that page.
        Fetch(url string) (body string, urls []string, err os.Error)
    }
    
    // Crawl uses fetcher to recursively crawl
    // pages starting with url, to a maximum of depth.
    func Crawl(url string, depth int, fetcher Fetcher) {
        type target struct{
            urls  []string
            depth int
        }
        more := make(chan target)
    
        fetchPage := func(url string, depth int) {
            body, urls, err := fetcher.Fetch(url)
            if err != nil {
                fmt.Println(err)
            } else {
                fmt.Printf("found: %s %q\n", url, body)
            }
            more <- target{urls, depth+1}
        }
    
        go fetchPage(url, 0)
    
        visited := map[string]bool{url:true}
        for counter := 1; counter > 0; {
            next := <-more
            counter--
            if next.depth > depth {
                continue
            }
            for _, url := range next.urls {
                if _, seen := visited[url]; seen {
                    continue
                }
                visited[url] = true
                counter++
                go fetchPage(url, next.depth)
            }
        }
    }
    
    func main() {
        Crawl("http://golang.org/", 4, fetcher)
    }
    
    
    // fakeFetcher is Fetcher that returns canned results.
    type fakeFetcher map[string]*fakeResult
    
    type fakeResult struct {
        body string
        urls     []string
    }
    
    func (f *fakeFetcher) Fetch(url string) (string, []string, os.Error) {
        if res, ok := (*f)[url]; ok {
            return res.body, res.urls, nil
        }
        return "", nil, fmt.Errorf("not found: %s", url)
    }
    
    // fetcher is a populated fakeFetcher.
    var fetcher = &fakeFetcher{
        "http://golang.org/": &fakeResult{
            "The Go Programming Language",
            []string{
                "http://golang.org/pkg/",
                "http://golang.org/cmd/",
            },
        },
        "http://golang.org/pkg/": &fakeResult{
            "Packages",
            []string{
                "http://golang.org/",
                "http://golang.org/cmd/",
                "http://golang.org/pkg/fmt/",
                "http://golang.org/pkg/os/",
            },
        },
        "http://golang.org/pkg/fmt/": &fakeResult{
            "Package fmt",
            []string{
                "http://golang.org/",
                "http://golang.org/pkg/",
            },
        },
        "http://golang.org/pkg/os/": &fakeResult{
            "Package os",
            []string{
                "http://golang.org/",
                "http://golang.org/pkg/",
            },
        },
    }
    

363

I cannot open some SSL-encrypted websites (https) in Firefox right now. I have found out that this is caused by the Online Certificate Status Protocol (OCSP) which confirms the current validity of certificates. All those websites are using certificates issued by cacert.org and their OCSP server (ocsp.cacert.org) is currently quiet busy (“Too many users” or timeouts).

Quick workaround is to temporarily disable OCSP in Firefox (or other affected web browsers). You can uncheck this feature in Preferences → Advanced → Encryption → Validation in Firefox.

Update: Server seems to be working again.

Significant Improvements to Indexes in MongoDB v2.0

I have upgraded my MongoDB installation to version 2.0 today and converted existing indexes to {v:1} format. According to the release notes, “Indexes are often 25% smaller and 25% faster (depends on the use case)”.

> db.hashes.totalIndexSize()
5932986112
> db.hashes.runCommand( "compact" )
{ "ok" : 1 }
> db.hashes.totalIndexSize()
3527294016

In my case, a more than 10 GB database, the totalIndexSize of {v:1} format indexes is reduced by more than 40% to only 59.45% of the size of the old {v:0} format indexes. Well done, MongoDB developers!

Instant Query Log for MySQL

MySQL has build-in logging for queries (general) and slow queries (slow). You can enable them at startup. It is not recommended to enable general logs on a high-traffic database server for performance reasons but sometimes you may want to be able to see what queries are executed there.

Since mk-query-digest is able to read tcpdumps, it is the perfect tool to create an instant query log. Just pipe all network traffic on MySQL port to mk-query-digest for analysis.

tcpdump -i eth2 port 3306 -s 65535  -x -n -q -tttt | 
  mk-query-digest --type tcpdump

There are much more things you can do with mk-query-digest, so it is worth to write a dump of network traffic to a file and work with it.

tcpdump -i eth2 port 3306 -s 65535  -x -n -q -tttt > tcpdump.out

You can convert it to slow query log format and parse it with your favorite analysis tool.

mk-query-digest --type tcpdump --print --noreport < tcpdump.out > slow.log

The report of an analysis of mk-query-digest fingerprints queries. To search for them in your logfile, you can use the --filter parameter.

mk-query-digest slow.log --no-report --print 
--filter '$event->{fingerprint} && make_checksum($event->{fingerprint}) eq "76A68B0365255C58"'

Logrotate With MongoDB

MongoDB packages are shipped with logging enabled in configuration but without a script to rotate the logfile. There are two build-in ways to let MongoDB rotate its logfile. You can execute db.runCommand("logRotate"); from the mongo shell or kill -SIGUSR1 $(cat /var/lib/mongodb/mongod.lock) from unix shell.

The best way to automate this is to use logrotate. Copy the following code into /etc/logrotate.d/mongodb and make sure that pathes and filenames in the script correspond with those in your system.

/var/log/mongodb/mongodb.log {
    daily
    rotate 7
    compress
    missingok
    notifempty
    sharedscripts
    postrotate
        /bin/kill -SIGUSR1 `cat /var/lib/mongodb/mongod.lock` && 
        rm -f /var/log/mongodb/mongodb.log.????-??-??T??-??-??
    endscript
}

It rotates the logfile /var/log/mongodb/mongodb.log on a daily basis and keeps them for 7 days compressed. Since MongoDB’s build-in logRotate conflicts with logrotate, it also cleans up the empty, log-rotated files created by MongoDB.

Solarize Your Terminal!

I have read about Solarized recently. It is a sixteen color palette designed for use with terminal and gui applications. I am using both the dark and light mode in Gnome Terminal, vim and Netbeans since a few days and I really like to work with it and do not want to miss it.

Perl: Inserting a Lot of Documents in MongoDB

In the last few day, I spend a lot of time to figure out and learn how MongoDB works and what is the best solution to fill a collection with millions of records (documents). To be more precise, I want to store unique strings and some other data related to that strings. One should be able to search for values in all fields, therefore I set indexes for all of them and a unique one for the strings.

Since I want to insert a lot of records, I thought it would be a good idea to use batch_insert (see API Documentation for Perl driver), which would insert each document in an array into the database. It turned out very fast that I have to check for duplicate strings because batch-insert breaks immediately when it gets an error from the unique index and does not insert the remaining documents. I ended up with something like the following, which does what it should but I had a bad feeling and started to profile that code.

while() {
    chomp(my $string = $_);
    next if defined($mongo_coll->find_one({ plaintext => $string }));
    push(@documents, { 
        "plaintext" =>  "$string",
        "foo"       =>  "bar",
    });
    if ( $counter   == $max_inserts ) {
        mdb_batch_insert(@documents);
        @hashes=(); $counter=1;
    }
}
mdb_batch_insert(@documents);

sub mdb_batch_insert {
    my $documents_ref = shift;
    $mongo_coll->batch_insert($documents_ref);
    my $err = $mongo_db->last_error();
    unless ( $err->{ok} eq 1 and not defined($err->{err}) ) {
        $logger->error(Dumper($err));
    }
}

For profiling (using Devel::SmallProf) and benchmarking I inserted the same 100000 documents twice into an empty collection. Doing so, I have got results for inserts without collisions with unique index and no inserts or inserts with 100% collisions.

The results pointed out that a lot of time was wasted with checking for duplicates and errors. I started to play around with other ways to insert data and was surprised to see that the easiest approach is also the fastest: Since MongoDB takes automatically care of duplicates, it is possible to just insert each document separately without checking for errors.

while () {
    chomp( my $plaintext = $_ );
    $mongo_coll->insert({
        "plaintext" =>  "$string",
        "foo"       =>  "bar",
    });
}

I use that code to insert the first 23 million documents. After approximately 10 million inserts, I could see that performance of MongoDB and my script went down. Another problem was that some documents (~5000) were not inserted due to errors I did not checked for. What have I learned? That I have to check for errors and even when it is not a measurable problem in an almost empty database, of course, insert is more expensive than find_one.

while () {
    chomp( my $plaintext = $_ );
    next if defined( $mongo_coll->find_one( { plaintext => $plaintext } ) );
    eval {
        $mongo_coll->insert(
            {
                        "plaintext" =>  "$plaintext",
                        "foo"       =>  "bar",
            },
            { safe => 1 }
        );
    };
    $logger->error($@) if $@;
}

At the end, one could say that it would have been obvious that the final solution is the best. Yes, it is how I would have done it with MySQL. But I wanted to become familiar with it and was wondering whether MongoDB would act similar or not. Now I can be sure.