The telecom industry is undergoing its most significant transformation since the shift from analog to digital. As operators deploy 5G networks and embrace Network Functions Virtualization (NFV), they’re discovering that Go has become the de facto language for building cloud-native control planes. From Kubernetes to Istio, from etcd to CoreDNS—the infrastructure powering modern telecom runs on Go.

Why Telecom Chose Go

Traditional telecom stacks were built with C/C++ for performance and Java for enterprise services. So why are operators and vendors now standardizing on Go for control plane development?

Concurrency Built for Network Operations

Telecom control planes manage millions of concurrent connections, sessions, and state transitions. Go’s goroutines and channels map naturally to these requirements:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
package main

import (
    "context"
    "log"
    "sync"
)

type Session struct {
    ID        string
    IMSI      string
    State     SessionState
    QoSPolicy QoSPolicy
}

type SessionManager struct {
    sessions sync.Map
    events   chan SessionEvent
}

func NewSessionManager(ctx context.Context) *SessionManager {
    sm := &SessionManager{
        events: make(chan SessionEvent, 10000),
    }
    
    // Process session events concurrently
    for i := 0; i < 100; i++ {
        go sm.processEvents(ctx)
    }
    
    return sm
}

func (sm *SessionManager) processEvents(ctx context.Context) {
    for {
        select {
        case event := <-sm.events:
            sm.handleEvent(event)
        case <-ctx.Done():
            return
        }
    }
}

func (sm *SessionManager) handleEvent(event SessionEvent) {
    switch e := event.(type) {
    case *SessionCreateEvent:
        sm.sessions.Store(e.SessionID, &Session{
            ID:    e.SessionID,
            IMSI:  e.IMSI,
            State: StateActive,
        })
        log.Printf("Session %s created for IMSI %s", e.SessionID, e.IMSI)
    case *SessionReleaseEvent:
        sm.sessions.Delete(e.SessionID)
        log.Printf("Session %s released", e.SessionID)
    }
}

Each goroutine uses only ~2KB of stack space, allowing millions of concurrent operations without the thread overhead that would cripple Java or C++ implementations.

Fast Compilation, Fast Deployment

In telecom, downtime costs millions. Go’s compilation speed enables:

  • Rapid CI/CD pipelines for network functions
  • Quick rollbacks when issues arise
  • Efficient development cycles for 5G feature velocity

A typical control plane service compiles in seconds, not minutes.

Static Binaries for CNF Deployment

Container Network Functions (CNFs) benefit from Go’s static binary output:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
# Multi-stage build for minimal CNF image
FROM golang:1.22 AS builder
WORKDIR /app
COPY go.mod go.sum ./
RUN go mod download
COPY . .
RUN CGO_ENABLED=0 GOOS=linux go build -o /smf-controller ./cmd/smf

FROM scratch
COPY --from=builder /smf-controller /smf-controller
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
ENTRYPOINT ["/smf-controller"]

The result: container images under 20MB that start in milliseconds—critical for telecom’s scaling requirements.

Building a 5G Control Plane with Go

Let’s explore how Go enables the key components of a cloud-native 5G core.

Session Management Function (SMF)

The SMF manages PDU sessions and QoS. Here’s a pattern for handling high-throughput session operations:

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
package smf

import (
    "context"
    "sync"
    "time"

    "github.com/prometheus/client_golang/prometheus"
)

var (
    activeSessions = prometheus.NewGauge(prometheus.GaugeOpts{
        Name: "smf_active_sessions",
        Help: "Number of active PDU sessions",
    })
    
    sessionLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{
        Name:    "smf_session_operation_seconds",
        Help:    "Session operation latency",
        Buckets: []float64{.001, .005, .01, .025, .05, .1},
    }, []string{"operation"})
)

type PDUSession struct {
    ID            string
    SUPI          string
    DNN           string
    SNssai        SNssai
    QoSFlows      []QoSFlow
    UPFEndpoint   string
    CreatedAt     time.Time
    mu            sync.RWMutex
}

type SMFController struct {
    sessions     sync.Map
    upfClient    UPFClient
    amfClient    AMFClient
    policyClient PCFClient
}

func (s *SMFController) CreateSession(ctx context.Context, req *CreateSessionRequest) (*PDUSession, error) {
    timer := prometheus.NewTimer(sessionLatency.WithLabelValues("create"))
    defer timer.ObserveDuration()
    
    // Fetch QoS policy from PCF
    policy, err := s.policyClient.GetPolicy(ctx, req.SUPI, req.DNN)
    if err != nil {
        return nil, fmt.Errorf("policy fetch failed: %w", err)
    }
    
    // Select UPF based on locality and capacity
    upf, err := s.selectUPF(ctx, req.SNssai, req.DNN)
    if err != nil {
        return nil, fmt.Errorf("UPF selection failed: %w", err)
    }
    
    // Establish N4 session with UPF
    n4Session, err := s.upfClient.CreateSession(ctx, &N4CreateRequest{
        SEID:     generateSEID(),
        PDRs:     buildPDRs(policy),
        FARs:     buildFARs(upf),
        QERs:     buildQERs(policy.QoSFlows),
    })
    if err != nil {
        return nil, fmt.Errorf("N4 session creation failed: %w", err)
    }
    
    session := &PDUSession{
        ID:          generateSessionID(),
        SUPI:        req.SUPI,
        DNN:         req.DNN,
        SNssai:      req.SNssai,
        QoSFlows:    policy.QoSFlows,
        UPFEndpoint: upf.Endpoint,
        CreatedAt:   time.Now(),
    }
    
    s.sessions.Store(session.ID, session)
    activeSessions.Inc()
    
    return session, nil
}

func (s *SMFController) selectUPF(ctx context.Context, snssai SNssai, dnn string) (*UPF, error) {
    // Locality-aware UPF selection for edge deployment
    upfs, err := s.upfClient.ListUPFs(ctx)
    if err != nil {
        return nil, err
    }
    
    var selected *UPF
    minLatency := time.Hour
    
    for _, upf := range upfs {
        if !upf.SupportsSlice(snssai) || !upf.SupportsDNN(dnn) {
            continue
        }
        
        if upf.CurrentLoad < 0.8 && upf.Latency < minLatency {
            selected = upf
            minLatency = upf.Latency
        }
    }
    
    if selected == nil {
        return nil, ErrNoAvailableUPF
    }
    
    return selected, nil
}

Network Slice Orchestration

Go excels at building orchestrators that manage network slice lifecycle:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
package slice

import (
    "context"
    "fmt"

    "k8s.io/apimachinery/pkg/runtime"
    ctrl "sigs.k8s.io/controller-runtime"
    "sigs.k8s.io/controller-runtime/pkg/client"
)

// NetworkSlice is a custom resource for 5G network slices
type NetworkSlice struct {
    metav1.TypeMeta   `json:",inline"`
    metav1.ObjectMeta `json:"metadata,omitempty"`
    
    Spec   NetworkSliceSpec   `json:"spec,omitempty"`
    Status NetworkSliceStatus `json:"status,omitempty"`
}

type NetworkSliceSpec struct {
    SNssai      SNssai            `json:"snssai"`
    QoSProfile  QoSProfile        `json:"qosProfile"`
    Capacity    SliceCapacity     `json:"capacity"`
    Isolation   IsolationLevel    `json:"isolation"`
    DNNs        []string          `json:"dnns"`
}

type NetworkSliceReconciler struct {
    client.Client
    Scheme *runtime.Scheme
    SMF    *SMFController
    UPF    *UPFController
}

func (r *NetworkSliceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
    var slice NetworkSlice
    if err := r.Get(ctx, req.NamespacedName, &slice); err != nil {
        return ctrl.Result{}, client.IgnoreNotFound(err)
    }
    
    // Ensure network functions are deployed for this slice
    if err := r.reconcileNFs(ctx, &slice); err != nil {
        return ctrl.Result{}, err
    }
    
    // Configure QoS policies
    if err := r.reconcileQoS(ctx, &slice); err != nil {
        return ctrl.Result{}, err
    }
    
    // Update slice status
    slice.Status.State = SliceStateActive
    slice.Status.ActiveSessions = r.SMF.CountSessionsForSlice(slice.Spec.SNssai)
    
    if err := r.Status().Update(ctx, &slice); err != nil {
        return ctrl.Result{}, err
    }
    
    return ctrl.Result{RequeueAfter: 30 * time.Second}, nil
}

func (r *NetworkSliceReconciler) reconcileNFs(ctx context.Context, slice *NetworkSlice) error {
    // Deploy slice-specific SMF instance if isolation is required
    if slice.Spec.Isolation == IsolationDedicated {
        smfDeploy := buildSMFDeployment(slice)
        if err := r.Create(ctx, smfDeploy); err != nil && !errors.IsAlreadyExists(err) {
            return fmt.Errorf("failed to create SMF: %w", err)
        }
    }
    
    // Configure UPF for this slice
    upfConfig := buildUPFConfig(slice)
    if err := r.UPF.ApplySliceConfig(ctx, upfConfig); err != nil {
        return fmt.Errorf("failed to configure UPF: %w", err)
    }
    
    return nil
}

PFCP Protocol Implementation

The PFCP protocol connects the SMF to UPF. Go’s standard library makes protocol implementation clean:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
package pfcp

import (
    "encoding/binary"
    "net"
)

const (
    PFCPPort = 8805
)

type PFCPHeader struct {
    Version       uint8
    MessageType   uint8
    Length        uint16
    SEID          uint64
    SequenceNum   uint32
}

type PFCPNode struct {
    conn     *net.UDPConn
    handlers map[uint8]MessageHandler
    sessions sync.Map
}

func NewPFCPNode(addr string) (*PFCPNode, error) {
    udpAddr, err := net.ResolveUDPAddr("udp", addr)
    if err != nil {
        return nil, err
    }
    
    conn, err := net.ListenUDP("udp", udpAddr)
    if err != nil {
        return nil, err
    }
    
    node := &PFCPNode{
        conn:     conn,
        handlers: make(map[uint8]MessageHandler),
    }
    
    // Register standard handlers
    node.handlers[MsgTypeHeartbeatRequest] = node.handleHeartbeat
    node.handlers[MsgTypeAssociationSetupRequest] = node.handleAssociationSetup
    node.handlers[MsgTypeSessionEstablishmentRequest] = node.handleSessionEstablishment
    
    return node, nil
}

func (n *PFCPNode) Serve(ctx context.Context) error {
    buf := make([]byte, 65535)
    
    for {
        select {
        case <-ctx.Done():
            return ctx.Err()
        default:
        }
        
        size, remoteAddr, err := n.conn.ReadFromUDP(buf)
        if err != nil {
            continue
        }
        
        // Handle each message in a goroutine
        go n.handleMessage(buf[:size], remoteAddr)
    }
}

func (n *PFCPNode) handleMessage(data []byte, addr *net.UDPAddr) {
    header := parsePFCPHeader(data)
    
    handler, ok := n.handlers[header.MessageType]
    if !ok {
        log.Printf("Unknown message type: %d", header.MessageType)
        return
    }
    
    response, err := handler(data, addr)
    if err != nil {
        log.Printf("Handler error: %v", err)
        return
    }
    
    if response != nil {
        n.conn.WriteToUDP(response, addr)
    }
}

Kubernetes-Native Telecom Operations

The real power of Go in telecom comes from native Kubernetes integration.

Custom Resource Definitions for Network Functions

Define your network functions as Kubernetes-native resources:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
package v1alpha1

import (
    metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="State",type=string,JSONPath=`.status.state`
// +kubebuilder:printcolumn:name="Sessions",type=integer,JSONPath=`.status.activeSessions`
type AMF struct {
    metav1.TypeMeta   `json:",inline"`
    metav1.ObjectMeta `json:"metadata,omitempty"`
    
    Spec   AMFSpec   `json:"spec,omitempty"`
    Status AMFStatus `json:"status,omitempty"`
}

type AMFSpec struct {
    Replicas    int32           `json:"replicas"`
    PLMN        PLMNIdentity    `json:"plmn"`
    TACs        []string        `json:"tacs"`
    Slices      []SNssai        `json:"slices"`
    N2Endpoint  string          `json:"n2Endpoint"`
    Resources   ResourceSpec    `json:"resources"`
}

type AMFStatus struct {
    State            string       `json:"state"`
    ActiveSessions   int32        `json:"activeSessions"`
    ConnectedGNBs    int32        `json:"connectedGnbs"`
    LastHeartbeat    metav1.Time  `json:"lastHeartbeat"`
    Conditions       []Condition  `json:"conditions,omitempty"`
}

Operator Pattern for Lifecycle Management

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
package controllers

import (
    "context"

    appsv1 "k8s.io/api/apps/v1"
    corev1 "k8s.io/api/core/v1"
    ctrl "sigs.k8s.io/controller-runtime"
)

type AMFReconciler struct {
    client.Client
    Scheme *runtime.Scheme
}

func (r *AMFReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
    var amf telcov1alpha1.AMF
    if err := r.Get(ctx, req.NamespacedName, &amf); err != nil {
        return ctrl.Result{}, client.IgnoreNotFound(err)
    }
    
    // Reconcile ConfigMap with AMF configuration
    configMap := r.buildConfigMap(&amf)
    if err := r.createOrUpdate(ctx, configMap); err != nil {
        return ctrl.Result{}, err
    }
    
    // Reconcile Deployment
    deployment := r.buildDeployment(&amf)
    if err := r.createOrUpdate(ctx, deployment); err != nil {
        return ctrl.Result{}, err
    }
    
    // Reconcile Services for N1/N2 interfaces
    n2Service := r.buildN2Service(&amf)
    if err := r.createOrUpdate(ctx, n2Service); err != nil {
        return ctrl.Result{}, err
    }
    
    // Reconcile SCTP service for gNB connections
    sctpService := r.buildSCTPService(&amf)
    if err := r.createOrUpdate(ctx, sctpService); err != nil {
        return ctrl.Result{}, err
    }
    
    // Update status from running pods
    if err := r.updateStatus(ctx, &amf); err != nil {
        return ctrl.Result{}, err
    }
    
    return ctrl.Result{RequeueAfter: 10 * time.Second}, nil
}

func (r *AMFReconciler) buildDeployment(amf *telcov1alpha1.AMF) *appsv1.Deployment {
    return &appsv1.Deployment{
        ObjectMeta: metav1.ObjectMeta{
            Name:      amf.Name,
            Namespace: amf.Namespace,
        },
        Spec: appsv1.DeploymentSpec{
            Replicas: &amf.Spec.Replicas,
            Selector: &metav1.LabelSelector{
                MatchLabels: map[string]string{
                    "app":        "amf",
                    "nf.5g/name": amf.Name,
                },
            },
            Template: corev1.PodTemplateSpec{
                Spec: corev1.PodSpec{
                    Containers: []corev1.Container{{
                        Name:  "amf",
                        Image: "registry.example.com/5g-core/amf:v1.0.0",
                        Ports: []corev1.ContainerPort{
                            {Name: "n2", ContainerPort: 38412, Protocol: corev1.ProtocolSCTP},
                            {Name: "sbi", ContainerPort: 8080, Protocol: corev1.ProtocolTCP},
                        },
                        Resources: buildResourceRequirements(amf.Spec.Resources),
                    }},
                },
            },
        },
    }
}

Performance Patterns for Telecom

Zero-Allocation Hot Paths

In control planes handling millions of messages per second, allocation matters:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
package parser

import (
    "sync"
)

// Pre-allocate message buffers
var messagePool = sync.Pool{
    New: func() interface{} {
        return &NGAPMessage{
            IEs: make([]InformationElement, 0, 16),
        }
    },
}

func ParseNGAP(data []byte) (*NGAPMessage, error) {
    msg := messagePool.Get().(*NGAPMessage)
    msg.Reset() // Clear previous data
    
    // Parse into pre-allocated message
    if err := msg.Unmarshal(data); err != nil {
        messagePool.Put(msg)
        return nil, err
    }
    
    return msg, nil
}

func ReleaseMessage(msg *NGAPMessage) {
    messagePool.Put(msg)
}

Sharded State for Lock Contention

Distribute state across shards to reduce lock contention:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
package state

import (
    "hash/fnv"
    "sync"
)

const numShards = 256

type ShardedSessionStore struct {
    shards [numShards]*sessionShard
}

type sessionShard struct {
    mu       sync.RWMutex
    sessions map[string]*Session
}

func NewShardedSessionStore() *ShardedSessionStore {
    store := &ShardedSessionStore{}
    for i := 0; i < numShards; i++ {
        store.shards[i] = &sessionShard{
            sessions: make(map[string]*Session),
        }
    }
    return store
}

func (s *ShardedSessionStore) getShard(key string) *sessionShard {
    h := fnv.New32a()
    h.Write([]byte(key))
    return s.shards[h.Sum32()%numShards]
}

func (s *ShardedSessionStore) Get(sessionID string) (*Session, bool) {
    shard := s.getShard(sessionID)
    shard.mu.RLock()
    session, ok := shard.sessions[sessionID]
    shard.mu.RUnlock()
    return session, ok
}

func (s *ShardedSessionStore) Set(sessionID string, session *Session) {
    shard := s.getShard(sessionID)
    shard.mu.Lock()
    shard.sessions[sessionID] = session
    shard.mu.Unlock()
}

Service Mesh Integration

Telecom control planes benefit from service mesh capabilities:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
  name: smf-routing
spec:
  hosts:
  - smf.5g-core.svc.cluster.local
  http:
  - match:
    - headers:
        x-slice-id:
          exact: "1-000001"
    route:
    - destination:
        host: smf.5g-core.svc.cluster.local
        subset: slice-embb
  - match:
    - headers:
        x-slice-id:
          exact: "1-000002"
    route:
    - destination:
        host: smf.5g-core.svc.cluster.local
        subset: slice-urllc
---
apiVersion: networking.istio.io/v1beta1
kind: DestinationRule
metadata:
  name: smf-slices
spec:
  host: smf.5g-core.svc.cluster.local
  subsets:
  - name: slice-embb
    labels:
      slice: embb
  - name: slice-urllc
    labels:
      slice: urllc
    trafficPolicy:
      connectionPool:
        tcp:
          maxConnections: 1000
        http:
          http2MaxRequests: 1000
          maxRequestsPerConnection: 10

Observability for Telecom SLAs

Telecom operators need deep visibility into control plane operations:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
package metrics

import (
    "github.com/prometheus/client_golang/prometheus"
    "github.com/prometheus/client_golang/prometheus/promauto"
)

var (
    // Session metrics
    sessionEstablishmentLatency = promauto.NewHistogramVec(
        prometheus.HistogramOpts{
            Name:    "smf_session_establishment_duration_seconds",
            Help:    "PDU session establishment latency",
            Buckets: []float64{.01, .025, .05, .1, .25, .5, 1, 2.5},
        },
        []string{"dnn", "slice", "result"},
    )
    
    sessionEstablishmentTotal = promauto.NewCounterVec(
        prometheus.CounterOpts{
            Name: "smf_session_establishment_total",
            Help: "Total PDU session establishment attempts",
        },
        []string{"dnn", "slice", "result"},
    )
    
    // N4 interface metrics
    n4MessageLatency = promauto.NewHistogramVec(
        prometheus.HistogramOpts{
            Name:    "smf_n4_message_duration_seconds",
            Help:    "PFCP message round-trip time",
            Buckets: []float64{.001, .005, .01, .025, .05, .1},
        },
        []string{"message_type", "upf"},
    )
    
    // 3GPP-defined KPIs
    registeredSubscribers = promauto.NewGaugeVec(
        prometheus.GaugeOpts{
            Name: "amf_registered_subscribers",
            Help: "Number of registered subscribers (5GS.NbrRegisteredSub)",
        },
        []string{"plmn", "slice"},
    )
)

Conclusion

Go has become the lingua franca of cloud-native telecom for good reason:

  • Goroutines handle the massive concurrency of telecom control planes
  • Fast compilation enables rapid iteration and deployment
  • Static binaries create minimal container images for CNFs
  • Kubernetes integration is native through controller-runtime
  • Strong standard library simplifies protocol implementation

The telecom industry’s shift to cloud-native architectures isn’t just about technology—it’s about agility, cost efficiency, and the ability to deploy new services in days rather than months. Go enables all of this while maintaining the performance characteristics telecom demands.

At Sajima Solutions, we help telecom operators and vendors build cloud-native control planes that meet carrier-grade requirements. Contact us to discuss your 5G core modernization journey.