1
0
mirror of https://github.com/MetaCubeX/mihomo.git synced 2025-07-18 17:08:06 +08:00

chore: allow embedded xsync.Map to be lazily initialized

This commit is contained in:
wwqgtxx
2025-07-15 17:33:36 +08:00
parent 349b773b40
commit aa555ced5f
9 changed files with 42 additions and 43 deletions

View File

@ -35,7 +35,7 @@ type Proxy struct {
C.ProxyAdapter
alive atomic.Bool
history *queue.Queue[C.DelayHistory]
extra *xsync.Map[string, *internalProxyState]
extra xsync.Map[string, *internalProxyState]
}
// Adapter implements C.Proxy
@ -293,7 +293,7 @@ func NewProxy(adapter C.ProxyAdapter) *Proxy {
ProxyAdapter: adapter,
history: queue.New[C.DelayHistory](defaultHistoriesNum),
alive: atomic.NewBool(true),
extra: xsync.NewMap[string, *internalProxyState]()}
}
}
func urlToMetadata(rawURL string) (addr C.Metadata, err error) {

View File

@ -3,7 +3,9 @@ package xsync
// copy and modified from https://github.com/puzpuzpuz/xsync/blob/v4.1.0/map.go
// which is licensed under Apache v2.
//
// parallel Map resize has been removed to decrease the memory using
// mihomo modified:
// 1. parallel Map resize has been removed to decrease the memory using.
// 2. the zero Map is ready for use.
import (
"fmt"
@ -95,6 +97,7 @@ const (
// and C++'s absl::flat_hash_map (meta memory and SWAR-based
// lookups).
type Map[K comparable, V any] struct {
initOnce sync.Once
totalGrowths atomic.Int64
totalShrinks atomic.Int64
resizing atomic.Bool // resize in progress flag
@ -172,28 +175,30 @@ func WithGrowOnly() func(*MapConfig) {
// NewMap creates a new Map instance configured with the given
// options.
func NewMap[K comparable, V any](options ...func(*MapConfig)) *Map[K, V] {
c := &MapConfig{
sizeHint: defaultMinMapTableLen * entriesPerMapBucket,
}
c := &MapConfig{}
for _, o := range options {
o(c)
}
m := &Map[K, V]{}
m.resizeCond = *sync.NewCond(&m.resizeMu)
var table *mapTable[K, V]
if c.sizeHint <= defaultMinMapTableLen*entriesPerMapBucket {
table = newMapTable[K, V](defaultMinMapTableLen)
} else {
if c.sizeHint > defaultMinMapTableLen*entriesPerMapBucket {
tableLen := nextPowOf2(uint32((float64(c.sizeHint) / entriesPerMapBucket) / mapLoadFactor))
table = newMapTable[K, V](int(tableLen))
m.minTableLen = int(tableLen)
}
m.minTableLen = len(table.buckets)
m.growOnly = c.growOnly
m.table.Store(table)
return m
}
func (m *Map[K, V]) init() {
if m.minTableLen == 0 {
m.minTableLen = defaultMinMapTableLen
}
m.resizeCond = *sync.NewCond(&m.resizeMu)
table := newMapTable[K, V](m.minTableLen)
m.minTableLen = len(table.buckets)
m.table.Store(table)
}
func newMapTable[K comparable, V any](minTableLen int) *mapTable[K, V] {
buckets := make([]bucketPadded[K, V], minTableLen)
for i := range buckets {
@ -233,6 +238,7 @@ func ToPlainMap[K comparable, V any](m *Map[K, V]) map[K]V {
// of type V if no value is present.
// The ok result indicates whether value was found in the map.
func (m *Map[K, V]) Load(key K) (value V, ok bool) {
m.initOnce.Do(m.init)
table := m.table.Load()
hash := maphash.Comparable(table.seed, key)
h1 := h1(hash)
@ -389,6 +395,7 @@ func (m *Map[K, V]) doCompute(
loadOp loadOp,
computeOnly bool,
) (V, bool) {
m.initOnce.Do(m.init)
for {
compute_attempt:
var (
@ -672,6 +679,7 @@ func copyBucket[K comparable, V any](
// modification rule apply, i.e. the changes may be not reflected
// in the subsequently iterated entries.
func (m *Map[K, V]) Range(f func(key K, value V) bool) {
m.initOnce.Do(m.init)
// Pre-allocate array big enough to fit entries for most hash tables.
bentries := make([]*entry[K, V], 0, 16*entriesPerMapBucket)
table := m.table.Load()
@ -709,11 +717,13 @@ func (m *Map[K, V]) Range(f func(key K, value V) bool) {
// Clear deletes all keys and values currently stored in the map.
func (m *Map[K, V]) Clear() {
m.initOnce.Do(m.init)
m.resize(m.table.Load(), mapClearHint)
}
// Size returns current size of the map.
func (m *Map[K, V]) Size() int {
m.initOnce.Do(m.init)
return int(m.table.Load().sumSize())
}
@ -828,6 +838,7 @@ func (s *MapStats) ToString() string {
// methods, this one is thread-safe. Yet it's an O(N) operation,
// so it should be used only for diagnostics or debugging purposes.
func (m *Map[K, V]) Stats() MapStats {
m.initOnce.Do(m.init)
stats := MapStats{
TotalGrowths: m.totalGrowths.Load(),
TotalShrinks: m.totalShrinks.Load(),

View File

@ -25,22 +25,19 @@ func init() {
var ErrReject = errors.New("reject loopback connection")
type Detector struct {
connMap *xsync.Map[netip.AddrPort, struct{}]
packetConnMap *xsync.Map[uint16, struct{}]
connMap xsync.Map[netip.AddrPort, struct{}]
packetConnMap xsync.Map[uint16, struct{}]
}
func NewDetector() *Detector {
if disableLoopBackDetector {
return nil
}
return &Detector{
connMap: xsync.NewMap[netip.AddrPort, struct{}](),
packetConnMap: xsync.NewMap[uint16, struct{}](),
}
return &Detector{}
}
func (l *Detector) NewConn(conn C.Conn) C.Conn {
if l == nil || l.connMap == nil {
if l == nil {
return conn
}
metadata := C.Metadata{}
@ -58,7 +55,7 @@ func (l *Detector) NewConn(conn C.Conn) C.Conn {
}
func (l *Detector) NewPacketConn(conn C.PacketConn) C.PacketConn {
if l == nil || l.packetConnMap == nil {
if l == nil {
return conn
}
metadata := C.Metadata{}
@ -77,7 +74,7 @@ func (l *Detector) NewPacketConn(conn C.PacketConn) C.PacketConn {
}
func (l *Detector) CheckConn(metadata *C.Metadata) error {
if l == nil || l.connMap == nil {
if l == nil {
return nil
}
connAddr := metadata.SourceAddrPort()
@ -91,7 +88,7 @@ func (l *Detector) CheckConn(metadata *C.Metadata) error {
}
func (l *Detector) CheckPacketConn(metadata *C.Metadata) error {
if l == nil || l.packetConnMap == nil {
if l == nil {
return nil
}
connAddr := metadata.SourceAddrPort()

View File

@ -9,21 +9,19 @@ import (
)
type Table struct {
mapping *xsync.Map[string, *entry]
mapping xsync.Map[string, *entry]
}
type entry struct {
PacketSender C.PacketSender
LocalUDPConnMap *xsync.Map[string, *net.UDPConn]
LocalLockMap *xsync.Map[string, *sync.Cond]
LocalUDPConnMap xsync.Map[string, *net.UDPConn]
LocalLockMap xsync.Map[string, *sync.Cond]
}
func (t *Table) GetOrCreate(key string, maker func() C.PacketSender) (C.PacketSender, bool) {
item, loaded := t.mapping.LoadOrStoreFn(key, func() *entry {
return &entry{
PacketSender: maker(),
LocalUDPConnMap: xsync.NewMap[string, *net.UDPConn](),
LocalLockMap: xsync.NewMap[string, *sync.Cond](),
PacketSender: maker(),
}
})
return item.PacketSender, loaded
@ -97,7 +95,5 @@ func makeLock() *sync.Cond {
// New return *Cache
func New() *Table {
return &Table{
mapping: xsync.NewMap[string, *entry](),
}
return &Table{}
}

View File

@ -48,7 +48,7 @@ type clientImpl struct {
openStreams atomic.Int64
closed atomic.Bool
udpInputMap *xsync.Map[uint32, net.Conn]
udpInputMap xsync.Map[uint32, net.Conn]
// only ready for PoolClient
dialerRef C.Dialer
@ -422,7 +422,6 @@ func NewClient(clientOption *ClientOption, udp bool, dialerRef C.Dialer) *Client
ClientOption: clientOption,
udp: udp,
dialerRef: dialerRef,
udpInputMap: xsync.NewMap[uint32, net.Conn](),
}
c := &Client{ci}
runtime.SetFinalizer(c, closeClient)

View File

@ -34,7 +34,6 @@ func NewServerHandler(option *ServerOption, quicConn *quic.Conn, uuid uuid.UUID)
quicConn: quicConn,
uuid: uuid,
authCh: make(chan struct{}),
udpInputMap: xsync.NewMap[uint32, *atomic.Bool](),
}
}
@ -47,7 +46,7 @@ type serverHandler struct {
authOk atomic.Bool
authOnce sync.Once
udpInputMap *xsync.Map[uint32, *atomic.Bool]
udpInputMap xsync.Map[uint32, *atomic.Bool]
}
func (s *serverHandler) AuthOk() bool {

View File

@ -47,7 +47,7 @@ type clientImpl struct {
openStreams atomic.Int64
closed atomic.Bool
udpInputMap *xsync.Map[uint16, net.Conn]
udpInputMap xsync.Map[uint16, net.Conn]
// only ready for PoolClient
dialerRef C.Dialer
@ -406,7 +406,6 @@ func NewClient(clientOption *ClientOption, udp bool, dialerRef C.Dialer) *Client
ClientOption: clientOption,
udp: udp,
dialerRef: dialerRef,
udpInputMap: xsync.NewMap[uint16, net.Conn](),
}
c := &Client{ci}
runtime.SetFinalizer(c, closeClient)

View File

@ -33,7 +33,6 @@ func NewServerHandler(option *ServerOption, quicConn *quic.Conn, uuid uuid.UUID)
quicConn: quicConn,
uuid: uuid,
authCh: make(chan struct{}),
udpInputMap: xsync.NewMap[uint16, *serverUDPInput](),
}
}
@ -47,7 +46,7 @@ type serverHandler struct {
authUUID atomic.TypedValue[string]
authOnce sync.Once
udpInputMap *xsync.Map[uint16, *serverUDPInput]
udpInputMap xsync.Map[uint16, *serverUDPInput]
}
func (s *serverHandler) AuthOk() bool {

View File

@ -14,7 +14,6 @@ var DefaultManager *Manager
func init() {
DefaultManager = &Manager{
connections: xsync.NewMap[string, Tracker](),
uploadTemp: atomic.NewInt64(0),
downloadTemp: atomic.NewInt64(0),
uploadBlip: atomic.NewInt64(0),
@ -28,7 +27,7 @@ func init() {
}
type Manager struct {
connections *xsync.Map[string, Tracker]
connections xsync.Map[string, Tracker]
uploadTemp atomic.Int64
downloadTemp atomic.Int64
uploadBlip atomic.Int64