package main
import (
"database/sql"
"encoding/json"
"fmt"
"log"
_ "github.com/lib/pq"
)
type User struct {
ID int `json:"id"`
Name string `json:"name"`
Email *string `json:"email"` // Email is a pointer to handle NULL values
}
func main() {
connStr := "user=username dbname=mydb sslmode=disable"
db, err := sql.Open("postgres", connStr)
if err != nil {
log.Fatal(err)
}
defer db.Close()
// Querying a user
var user User
row := db.QueryRow("SELECT id, name, email FROM users WHERE id = $1", 1)
err = row.Scan(&user.ID, &user.Name, &user.Email)
if err != nil {
if err == sql.ErrNoRows {
fmt.Println("No user found with that ID")
} else {
log.Fatal(err)
}
}
// Handle NULL email
if user.Email != nil {
fmt.Printf("User Email: %s\n", *user.Email)
} else {
fmt.Println("User Email: NULL")
}
// Convert User to JSON
userJson, err := json.Marshal(user)
if err != nil {
log.Fatal(err)
}
fmt.Println(string(userJson))
}
How do I avoid rehashing overhead with std::set in multithreaded code?
How do I find elements with custom comparators with std::set for embedded targets?
How do I erase elements while iterating with std::set for embedded targets?
How do I provide stable iteration order with std::unordered_map for large datasets?
How do I reserve capacity ahead of time with std::unordered_map for large datasets?
How do I erase elements while iterating with std::unordered_map in multithreaded code?
How do I provide stable iteration order with std::map for embedded targets?
How do I provide stable iteration order with std::map in multithreaded code?
How do I avoid rehashing overhead with std::map in performance-sensitive code?
How do I merge two containers efficiently with std::map for embedded targets?