Very low performance of GO code written for couchbase to insert 5lakh records

Can anyone please help me tune performance of the couchbase database by making my code work fast and efficiently . In c++ it takes only 19sec to insert 5lakh records to same db , but my GO code takes around 19minutes to complete the same .
below are the configuration values
“NumofDoc”:100000,
“ThreadCount”:5,

    package main
    
    import (
        "fmt"
        "gopkg.in/couchbase/gocb.v1"
        "strconv"
        "time"
        //"io/ioutil"
        //"strings"
        "github.com/tkanos/gonfig"
        //"os"
        //"encoding/json"
        "math/rand"
    //    "math"
       )   
        
    var  (    
        bucket *gocb.Bucket
         )   
    
    type Insert_doc struct {
        Thread_id int 
        KTAB, SyncBuffer string
    } 
    
    type Configuration struct {
        NumofDoc  int
        Username   string
        Password   string
        BucketName string
        ThreadCount int
        Port int
        OP_TYPE int
    }
    
    func main() {
         configuration := Configuration{}
         err := gonfig.GetConf("Couchbase_config.json", &configuration)
         fmt.Println("Config File name Passed :  Couchbase_config.json")
         fmt.Println("ThreadCount : ",configuration.ThreadCount)
         fmt.Println("Number of Requests per thread : ",configuration.NumofDoc)
    
    //	 if you have more than 5 core, make it 5 to use 5 os thread runtime.GOMAXPROCS(5)
        cluster, err := gocb.Connect("couchbase://") //Connects to the cluster
        if err != nil {
            fmt.Println(err.Error())
            return
        }
        cluster.Authenticate(gocb.PasswordAuthenticator{
            Username: configuration.Username,
            Password: configuration.Password,
        })
        fmt.Println("Cluster:%v",cluster)
            bucket, err = cluster.OpenBucket(configuration.BucketName, "") //Connects to the bucket
            if err != nil {
                fmt.Println(err.Error())
                return
            }
        fmt.Println("Bucket:%v",bucket)
        var jobs []chan int
        results := make(chan interface{}, configuration.NumofDoc)
        start := time.Now()      // current local time
        starttime := start.Unix()      // number of seconds since January 1, 1970 UTC
        for i := 0; i < configuration.ThreadCount; i++ {
    		workerJobs := make(chan int, configuration.NumofDoc) // create jobs per worker/thread
            jobs = append(jobs, workerJobs)
            go worker(i, workerJobs, results,configuration.OP_TYPE)
    	}
    	for _, jobs := range jobs {
    		for j := 1; j <= configuration.NumofDoc; j++ {
    			jobs <- j
    		}
    		close(jobs)
    	}
        // wait all workers to complete their jobs
    	for a := 1; a <= configuration.NumofDoc*len(jobs); a++ {     
    		//wait all the workers to return results
    		<-results
    	}    
    
        end := time.Now()      // current local time
        Endtime := end.Unix()      // number of seconds since January 1, 1970 UTC
        fmt.Printf("Script Starting time : %v \n",starttime)
        fmt.Printf("Script Ending time   : %v  \n",Endtime)
    	//fmt.Println("Process finished")
    
     }
    
        func worker(id int, jobs <-chan int, results chan<- interface{},s int) {
        //	fmt.Printf("worker %d started \n", id)     // do your thread measurements here 
        thread_start := time.Now()      // current local time
        Thread_Start := thread_start.Unix()      // number of seconds since January 1, 1970 UTC
        //fmt.Printf("Thread_%d Starting time : %v \n",id,Thread_Start)
        var readproportion int
        var updateproportion int
        var opsSequence[100]int
        operation_type := s
        if operation_type == 1 {
            updateproportion = 100
            readproportion = 0
        } else if operation_type == 2 {
            updateproportion = 0
            readproportion = 100
        } else if operation_type == 3 {
                updateproportion = 50
                 readproportion = 50
        }
        count:=0
        for b := 0; b < updateproportion; b++ {
           opsSequence[b] =1
           count++
        }
        for b := 0; b < readproportion; b++ {
           opsSequence[count+b]=2
        }
        var insertCount int64 = 0
        var readCount int64 = 0
    
        	for j := range jobs {
            k := j%100;
            optype := opsSequence[k];
            //fmt.Println("operation_type : ",optype)
            var x int = int(readCount % 50000);
            switch(optype){
                case 1:
                   document := Insert_doc{Thread_id: id, KTAB: "INSERT", SyncBuffer: RandomString(10000)}
                   test := "Go_Demo_"+strconv.Itoa(id)+"_"+strconv.Itoa(int(insertCount))
                   //fmt.Println("createDocument %v",test)
                   createDocument(test, &document)
                   insertCount++;
                   break;
                case 2:
                   test := "Go_Demo_"+strconv.Itoa(id)+"_"+strconv.Itoa(x)
                   //fmt.Println("getDocument %v",test)
                   getDocument(test) 
                   readCount++;
                   break;
                default:
                      fmt.Println("Invalid Operation Type ",optype)
    
            }
        		results <- jobs
        	}
        	//fmt.Printf("worker %d completed \n", id)
        thread_end := time.Now()      // current local time 
        Thread_End := thread_end.Unix()      // number of seconds since January 1, 1970 UTC
        //fmt.Printf("Thread_%d Ending time : %v \n",id,Thread_End)
        timediff := Thread_End - Thread_Start
        var avgLatency float64 = float64(timediff)/float64(insertCount+readCount);
        var opsPerSec float64 = 1/avgLatency;
        fmt.Printf("THREAD_ID %d TOTAL WRITE : %d, TOTAL READ : %d, TOTAL OPERATION TIME : %d, AVG_LATENCY = %f S, OPS_PER_SECOND = %f \n",id, insertCount,readCount, timediff, avgLatency,opsPerSec);
        }
    
        func createDocument(documentId string, document *Insert_doc) {
            //fmt.Println("Upserting a full document...")
            _, error := bucket.Upsert(documentId, document, 0)
            if error != nil {
                fmt.Println(error.Error())
            }
        }
    func RandomString(n int) string {
    	var letter = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
    
    	b := make([]rune, n)
    	for i := range b {
    		b[i] = letter[rand.Intn(len(letter))]
    	}
    	return string(b)
    }
    func getDocument(documentId string) {
    	//fmt.Println("Getting the full document by id...")
    	var get_data Insert_doc
    	_, error := bucket.Get(documentId, &get_data)
    	if error != nil {
    		fmt.Println(error.Error())
    		return
    	}
    //	jsonPerson, _ := json.Marshal(&person)
    //	fmt.Println(string(jsonPerson))
    } 

@sanjum080816 What values are you giving for NumOfDoc and ThreadCount?

 "NumofDoc":100000,
 "ThreadCount":5,

A suggestion would be to not build a full fledged code initially. Build the code in increments and run it as you develop so that you will get to know which code fragment you added, is delaying your output. There is noway that the go code is slower than the C code if its doing the same thing.

Make a copy of your existing file and give this basic code a try (Using wait groups instead of channels):

package main

import (
"fmt"
"sync"
"time"
)

func worker(wg *sync.WaitGroup, /* Other arguments */) {
  
  defer wg.Done()

  // Your worker code
}

func main() {

 var wg sync.WaitGroup

 for i := 1; i <= 5; i++ { // Number of threads (5 in your case)
    wg.Add(1)
    go worker(&wg, /* Other arguments */)
 }

 wg.Wait()
}

Give a try without concurrence as well. Try 1 lakh records without goroutines. Compare the performance.