瀏覽代碼

client/control: refactor code

fatedier 6 年之前
父節點
當前提交
b33ea9274c
共有 6 個文件被更改,包括 323 次插入313 次删除
  1. 1 1
      client/admin_api.go
  2. 33 186
      client/control.go
  3. 1 1
      client/proxy.go
  4. 116 116
      client/proxy_manager.go
  5. 171 8
      client/service.go
  6. 1 1
      conf/frpc_full.ini

+ 1 - 1
client/admin_api.go

@@ -85,7 +85,7 @@ func (svr *Service) apiReload(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 
-	err = svr.ctl.reloadConf(pxyCfgs, visitorCfgs)
+	err = svr.ctl.ReloadConf(pxyCfgs, visitorCfgs)
 	if err != nil {
 		res.Code = 4
 		res.Msg = err.Error()

+ 33 - 186
client/control.go

@@ -17,8 +17,6 @@ package client
 import (
 	"fmt"
 	"io"
-	"io/ioutil"
-	"runtime"
 	"runtime/debug"
 	"sync"
 	"time"
@@ -28,24 +26,15 @@ import (
 	"github.com/fatedier/frp/models/msg"
 	"github.com/fatedier/frp/utils/log"
 	frpNet "github.com/fatedier/frp/utils/net"
-	"github.com/fatedier/frp/utils/util"
-	"github.com/fatedier/frp/utils/version"
 
 	"github.com/fatedier/golib/control/shutdown"
 	"github.com/fatedier/golib/crypto"
 	fmux "github.com/hashicorp/yamux"
 )
 
-const (
-	connReadTimeout time.Duration = 10 * time.Second
-)
-
 type Control struct {
-	// frpc service
-	svr *Service
-
-	// login message to server, only used
-	loginMsg *msg.Login
+	// uniq id got from frps, attach it in loginMsg
+	runId string
 
 	// manage all proxies
 	pm *ProxyManager
@@ -65,14 +54,10 @@ type Control struct {
 	// read from this channel to get the next message sent by server
 	readCh chan (msg.Message)
 
-	// run id got from server
-	runId string
-
-	// if we call close() in control, do not reconnect to server
-	exit bool
-
 	// goroutines can block by reading from this channel, it will be closed only in reader() when control connection is closed
-	closedCh chan int
+	closedCh chan struct{}
+
+	closedDoneCh chan struct{}
 
 	// last time got the Pong message
 	lastPong time.Time
@@ -86,50 +71,28 @@ type Control struct {
 	log.Logger
 }
 
-func NewControl(svr *Service, pxyCfgs map[string]config.ProxyConf, visitorCfgs map[string]config.VisitorConf) *Control {
-	loginMsg := &msg.Login{
-		Arch:      runtime.GOARCH,
-		Os:        runtime.GOOS,
-		PoolCount: g.GlbClientCfg.PoolCount,
-		User:      g.GlbClientCfg.User,
-		Version:   version.Full(),
-	}
+func NewControl(runId string, conn frpNet.Conn, session *fmux.Session, pxyCfgs map[string]config.ProxyConf, visitorCfgs map[string]config.VisitorConf) *Control {
 	ctl := &Control{
-		svr:                svr,
-		loginMsg:           loginMsg,
+		runId:              runId,
+		conn:               conn,
+		session:            session,
 		sendCh:             make(chan msg.Message, 100),
 		readCh:             make(chan msg.Message, 100),
-		closedCh:           make(chan int),
+		closedCh:           make(chan struct{}),
+		closedDoneCh:       make(chan struct{}),
 		readerShutdown:     shutdown.New(),
 		writerShutdown:     shutdown.New(),
 		msgHandlerShutdown: shutdown.New(),
 		Logger:             log.NewPrefixLogger(""),
 	}
-	ctl.pm = NewProxyManager(ctl, ctl.sendCh, "")
+	ctl.pm = NewProxyManager(ctl.sendCh, "")
 	ctl.pm.Reload(pxyCfgs, false)
 	ctl.vm = NewVisitorManager(ctl)
 	ctl.vm.Reload(visitorCfgs)
 	return ctl
 }
 
-func (ctl *Control) Run() (err error) {
-	for {
-		err = ctl.login()
-		if err != nil {
-			ctl.Warn("login to server failed: %v", err)
-
-			// if login_fail_exit is true, just exit this program
-			// otherwise sleep a while and continues relogin to server
-			if g.GlbClientCfg.LoginFailExit {
-				return
-			} else {
-				time.Sleep(10 * time.Second)
-			}
-		} else {
-			break
-		}
-	}
-
+func (ctl *Control) Run() {
 	go ctl.worker()
 
 	// start all local visitors and send NewProxy message for all configured proxies
@@ -137,7 +100,7 @@ func (ctl *Control) Run() (err error) {
 	ctl.pm.CheckAndStartProxy([]string{ProxyStatusNew})
 
 	go ctl.vm.Run()
-	return nil
+	return
 }
 
 func (ctl *Control) HandleReqWorkConn(inMsg *msg.ReqWorkConn) {
@@ -179,80 +142,13 @@ func (ctl *Control) HandleNewProxyResp(inMsg *msg.NewProxyResp) {
 }
 
 func (ctl *Control) Close() error {
-	ctl.mu.Lock()
-	defer ctl.mu.Unlock()
-	ctl.exit = true
 	ctl.pm.CloseProxies()
 	return nil
 }
 
-// login send a login message to server and wait for a loginResp message.
-func (ctl *Control) login() (err error) {
-	if ctl.conn != nil {
-		ctl.conn.Close()
-	}
-	if ctl.session != nil {
-		ctl.session.Close()
-	}
-
-	conn, err := frpNet.ConnectServerByProxy(g.GlbClientCfg.HttpProxy, g.GlbClientCfg.Protocol,
-		fmt.Sprintf("%s:%d", g.GlbClientCfg.ServerAddr, g.GlbClientCfg.ServerPort))
-	if err != nil {
-		return err
-	}
-
-	defer func() {
-		if err != nil {
-			conn.Close()
-		}
-	}()
-
-	if g.GlbClientCfg.TcpMux {
-		fmuxCfg := fmux.DefaultConfig()
-		fmuxCfg.LogOutput = ioutil.Discard
-		session, errRet := fmux.Client(conn, fmuxCfg)
-		if errRet != nil {
-			return errRet
-		}
-		stream, errRet := session.OpenStream()
-		if errRet != nil {
-			session.Close()
-			return errRet
-		}
-		conn = frpNet.WrapConn(stream)
-		ctl.session = session
-	}
-
-	now := time.Now().Unix()
-	ctl.loginMsg.PrivilegeKey = util.GetAuthKey(g.GlbClientCfg.Token, now)
-	ctl.loginMsg.Timestamp = now
-	ctl.loginMsg.RunId = ctl.runId
-
-	if err = msg.WriteMsg(conn, ctl.loginMsg); err != nil {
-		return err
-	}
-
-	var loginRespMsg msg.LoginResp
-	conn.SetReadDeadline(time.Now().Add(connReadTimeout))
-	if err = msg.ReadMsgInto(conn, &loginRespMsg); err != nil {
-		return err
-	}
-	conn.SetReadDeadline(time.Time{})
-
-	if loginRespMsg.Error != "" {
-		err = fmt.Errorf("%s", loginRespMsg.Error)
-		ctl.Error("%s", loginRespMsg.Error)
-		return err
-	}
-
-	ctl.conn = conn
-	// update runId got from server
-	ctl.runId = loginRespMsg.RunId
-	g.GlbClientCfg.ServerUdpPort = loginRespMsg.ServerUdpPort
-	ctl.ClearLogPrefix()
-	ctl.AddLogPrefix(loginRespMsg.RunId)
-	ctl.Info("login to server success, get run id [%s], server udp port [%d]", loginRespMsg.RunId, loginRespMsg.ServerUdpPort)
-	return nil
+// ClosedDoneCh returns a channel which will be closed after all resources are released
+func (ctl *Control) ClosedDoneCh() <-chan struct{} {
+	return ctl.closedDoneCh
 }
 
 // connectServer return a new connection to frps
@@ -373,87 +269,38 @@ func (ctl *Control) msgHandler() {
 	}
 }
 
-// controler keep watching closedCh, start a new connection if previous control connection is closed.
-// If controler is notified by closedCh, reader and writer and handler will exit, then recall these functions.
+// If controler is notified by closedCh, reader and writer and handler will exit
 func (ctl *Control) worker() {
 	go ctl.msgHandler()
 	go ctl.reader()
 	go ctl.writer()
 
-	var err error
-	maxDelayTime := 20 * time.Second
-	delayTime := time.Second
-
 	checkInterval := 60 * time.Second
 	checkProxyTicker := time.NewTicker(checkInterval)
+
 	for {
 		select {
 		case <-checkProxyTicker.C:
 			// check which proxy registered failed and reregister it to server
 			ctl.pm.CheckAndStartProxy([]string{ProxyStatusStartErr, ProxyStatusClosed})
-		case _, ok := <-ctl.closedCh:
-			// we won't get any variable from this channel
-			if !ok {
-				// close related channels and wait until other goroutines done
-				close(ctl.readCh)
-				ctl.readerShutdown.WaitDone()
-				ctl.msgHandlerShutdown.WaitDone()
-
-				close(ctl.sendCh)
-				ctl.writerShutdown.WaitDone()
-
-				ctl.pm.CloseProxies()
-				// if ctl.exit is true, just exit
-				ctl.mu.RLock()
-				exit := ctl.exit
-				ctl.mu.RUnlock()
-				if exit {
-					return
-				}
-
-				// loop util reconnecting to server success
-				for {
-					ctl.Info("try to reconnect to server...")
-					err = ctl.login()
-					if err != nil {
-						ctl.Warn("reconnect to server error: %v", err)
-						time.Sleep(delayTime)
-						delayTime = delayTime * 2
-						if delayTime > maxDelayTime {
-							delayTime = maxDelayTime
-						}
-						continue
-					}
-					// reconnect success, init delayTime
-					delayTime = time.Second
-					break
-				}
-
-				// init related channels and variables
-				ctl.sendCh = make(chan msg.Message, 100)
-				ctl.readCh = make(chan msg.Message, 100)
-				ctl.closedCh = make(chan int)
-				ctl.readerShutdown = shutdown.New()
-				ctl.writerShutdown = shutdown.New()
-				ctl.msgHandlerShutdown = shutdown.New()
-				ctl.pm.Reset(ctl.sendCh, ctl.runId)
-
-				// previous work goroutines should be closed and start them here
-				go ctl.msgHandler()
-				go ctl.writer()
-				go ctl.reader()
-
-				// start all configured proxies
-				ctl.pm.CheckAndStartProxy([]string{ProxyStatusNew, ProxyStatusClosed})
-
-				checkProxyTicker.Stop()
-				checkProxyTicker = time.NewTicker(checkInterval)
-			}
+		case <-ctl.closedCh:
+			// close related channels and wait until other goroutines done
+			close(ctl.readCh)
+			ctl.readerShutdown.WaitDone()
+			ctl.msgHandlerShutdown.WaitDone()
+
+			close(ctl.sendCh)
+			ctl.writerShutdown.WaitDone()
+
+			ctl.pm.CloseProxies()
+
+			close(ctl.closedDoneCh)
+			return
 		}
 	}
 }
 
-func (ctl *Control) reloadConf(pxyCfgs map[string]config.ProxyConf, visitorCfgs map[string]config.VisitorConf) error {
+func (ctl *Control) ReloadConf(pxyCfgs map[string]config.ProxyConf, visitorCfgs map[string]config.VisitorConf) error {
 	ctl.vm.Reload(visitorCfgs)
 	err := ctl.pm.Reload(pxyCfgs, true)
 	return err

+ 1 - 1
client/proxy.go

@@ -35,7 +35,7 @@ import (
 	"github.com/fatedier/golib/pool"
 )
 
-// Proxy defines how to deal with work connections for different proxy type.
+// Proxy defines how to handle work connections for different proxy type.
 type Proxy interface {
 	Run() error
 

+ 116 - 116
client/proxy_manager.go

@@ -22,7 +22,6 @@ const (
 )
 
 type ProxyManager struct {
-	ctl     *Control
 	sendCh  chan (msg.Message)
 	proxies map[string]*ProxyWrapper
 	closed  bool
@@ -31,122 +30,8 @@ type ProxyManager struct {
 	log.Logger
 }
 
-type ProxyWrapper struct {
-	Name   string
-	Type   string
-	Status string
-	Err    string
-	Cfg    config.ProxyConf
-
-	RemoteAddr string
-
-	pxy Proxy
-
-	mu sync.RWMutex
-}
-
-type ProxyStatus struct {
-	Name   string           `json:"name"`
-	Type   string           `json:"type"`
-	Status string           `json:"status"`
-	Err    string           `json:"err"`
-	Cfg    config.ProxyConf `json:"cfg"`
-
-	// Got from server.
-	RemoteAddr string `json:"remote_addr"`
-}
-
-func NewProxyWrapper(cfg config.ProxyConf) *ProxyWrapper {
-	return &ProxyWrapper{
-		Name:   cfg.GetBaseInfo().ProxyName,
-		Type:   cfg.GetBaseInfo().ProxyType,
-		Status: ProxyStatusNew,
-		Cfg:    cfg,
-		pxy:    nil,
-	}
-}
-
-func (pw *ProxyWrapper) GetStatusStr() string {
-	pw.mu.RLock()
-	defer pw.mu.RUnlock()
-	return pw.Status
-}
-
-func (pw *ProxyWrapper) GetStatus() *ProxyStatus {
-	pw.mu.RLock()
-	defer pw.mu.RUnlock()
-	ps := &ProxyStatus{
-		Name:       pw.Name,
-		Type:       pw.Type,
-		Status:     pw.Status,
-		Err:        pw.Err,
-		Cfg:        pw.Cfg,
-		RemoteAddr: pw.RemoteAddr,
-	}
-	return ps
-}
-
-func (pw *ProxyWrapper) WaitStart() {
-	pw.mu.Lock()
-	defer pw.mu.Unlock()
-	pw.Status = ProxyStatusWaitStart
-}
-
-func (pw *ProxyWrapper) Start(remoteAddr string, serverRespErr string) error {
-	if pw.pxy != nil {
-		pw.pxy.Close()
-		pw.pxy = nil
-	}
-
-	if serverRespErr != "" {
-		pw.mu.Lock()
-		pw.Status = ProxyStatusStartErr
-		pw.RemoteAddr = remoteAddr
-		pw.Err = serverRespErr
-		pw.mu.Unlock()
-		return fmt.Errorf(serverRespErr)
-	}
-
-	pxy := NewProxy(pw.Cfg)
-	pw.mu.Lock()
-	defer pw.mu.Unlock()
-	pw.RemoteAddr = remoteAddr
-	if err := pxy.Run(); err != nil {
-		pw.Status = ProxyStatusStartErr
-		pw.Err = err.Error()
-		return err
-	}
-	pw.Status = ProxyStatusRunning
-	pw.Err = ""
-	pw.pxy = pxy
-	return nil
-}
-
-func (pw *ProxyWrapper) InWorkConn(workConn frpNet.Conn) {
-	pw.mu.RLock()
-	pxy := pw.pxy
-	pw.mu.RUnlock()
-	if pxy != nil {
-		workConn.Debug("start a new work connection, localAddr: %s remoteAddr: %s", workConn.LocalAddr().String(), workConn.RemoteAddr().String())
-		go pxy.InWorkConn(workConn)
-	} else {
-		workConn.Close()
-	}
-}
-
-func (pw *ProxyWrapper) Close() {
-	pw.mu.Lock()
-	defer pw.mu.Unlock()
-	if pw.pxy != nil {
-		pw.pxy.Close()
-		pw.pxy = nil
-	}
-	pw.Status = ProxyStatusClosed
-}
-
-func NewProxyManager(ctl *Control, msgSendCh chan (msg.Message), logPrefix string) *ProxyManager {
+func NewProxyManager(msgSendCh chan (msg.Message), logPrefix string) *ProxyManager {
 	return &ProxyManager{
-		ctl:     ctl,
 		proxies: make(map[string]*ProxyWrapper),
 		sendCh:  msgSendCh,
 		closed:  false,
@@ -309,3 +194,118 @@ func (pm *ProxyManager) GetAllProxyStatus() []*ProxyStatus {
 	}
 	return ps
 }
+
+type ProxyStatus struct {
+	Name   string           `json:"name"`
+	Type   string           `json:"type"`
+	Status string           `json:"status"`
+	Err    string           `json:"err"`
+	Cfg    config.ProxyConf `json:"cfg"`
+
+	// Got from server.
+	RemoteAddr string `json:"remote_addr"`
+}
+
+// ProxyWrapper is a wrapper of Proxy interface only used in ProxyManager
+// Add additional proxy status info
+type ProxyWrapper struct {
+	Name   string
+	Type   string
+	Status string
+	Err    string
+	Cfg    config.ProxyConf
+
+	RemoteAddr string
+
+	pxy Proxy
+
+	mu sync.RWMutex
+}
+
+func NewProxyWrapper(cfg config.ProxyConf) *ProxyWrapper {
+	return &ProxyWrapper{
+		Name:   cfg.GetBaseInfo().ProxyName,
+		Type:   cfg.GetBaseInfo().ProxyType,
+		Status: ProxyStatusNew,
+		Cfg:    cfg,
+		pxy:    nil,
+	}
+}
+
+func (pw *ProxyWrapper) GetStatusStr() string {
+	pw.mu.RLock()
+	defer pw.mu.RUnlock()
+	return pw.Status
+}
+
+func (pw *ProxyWrapper) GetStatus() *ProxyStatus {
+	pw.mu.RLock()
+	defer pw.mu.RUnlock()
+	ps := &ProxyStatus{
+		Name:       pw.Name,
+		Type:       pw.Type,
+		Status:     pw.Status,
+		Err:        pw.Err,
+		Cfg:        pw.Cfg,
+		RemoteAddr: pw.RemoteAddr,
+	}
+	return ps
+}
+
+func (pw *ProxyWrapper) WaitStart() {
+	pw.mu.Lock()
+	defer pw.mu.Unlock()
+	pw.Status = ProxyStatusWaitStart
+}
+
+func (pw *ProxyWrapper) Start(remoteAddr string, serverRespErr string) error {
+	if pw.pxy != nil {
+		pw.pxy.Close()
+		pw.pxy = nil
+	}
+
+	if serverRespErr != "" {
+		pw.mu.Lock()
+		pw.Status = ProxyStatusStartErr
+		pw.RemoteAddr = remoteAddr
+		pw.Err = serverRespErr
+		pw.mu.Unlock()
+		return fmt.Errorf(serverRespErr)
+	}
+
+	pxy := NewProxy(pw.Cfg)
+	pw.mu.Lock()
+	defer pw.mu.Unlock()
+	pw.RemoteAddr = remoteAddr
+	if err := pxy.Run(); err != nil {
+		pw.Status = ProxyStatusStartErr
+		pw.Err = err.Error()
+		return err
+	}
+	pw.Status = ProxyStatusRunning
+	pw.Err = ""
+	pw.pxy = pxy
+	return nil
+}
+
+func (pw *ProxyWrapper) InWorkConn(workConn frpNet.Conn) {
+	pw.mu.RLock()
+	pxy := pw.pxy
+	pw.mu.RUnlock()
+	if pxy != nil {
+		workConn.Debug("start a new work connection, localAddr: %s remoteAddr: %s", workConn.LocalAddr().String(), workConn.RemoteAddr().String())
+		go pxy.InWorkConn(workConn)
+	} else {
+		workConn.Close()
+	}
+}
+
+func (pw *ProxyWrapper) Close() {
+	pw.mu.Lock()
+	defer pw.mu.Unlock()
+	if pw.pxy != nil {
+		pw.pxy.Close()
+		pw.pxy = nil
+	}
+	pw.Status = ProxyStatusClosed
+}

+ 171 - 8
client/service.go

@@ -15,35 +15,85 @@
 package client
 
 import (
+	"fmt"
+	"io/ioutil"
+	"runtime"
+	"sync"
+	"sync/atomic"
+	"time"
+
 	"github.com/fatedier/frp/g"
 	"github.com/fatedier/frp/models/config"
+	"github.com/fatedier/frp/models/msg"
 	"github.com/fatedier/frp/utils/log"
+	frpNet "github.com/fatedier/frp/utils/net"
+	"github.com/fatedier/frp/utils/util"
+	"github.com/fatedier/frp/utils/version"
+
+	fmux "github.com/hashicorp/yamux"
 )
 
 type Service struct {
+	// uniq id got from frps, attach it in loginMsg
+	runId string
+
 	// manager control connection with server
-	ctl *Control
+	ctl   *Control
+	ctlMu sync.RWMutex
 
+	pxyCfgs     map[string]config.ProxyConf
+	visitorCfgs map[string]config.VisitorConf
+	cfgMu       sync.RWMutex
+
+	exit     uint32 // 0 means not exit
 	closedCh chan int
 }
 
 func NewService(pxyCfgs map[string]config.ProxyConf, visitorCfgs map[string]config.VisitorConf) (svr *Service) {
 	svr = &Service{
-		closedCh: make(chan int),
+		pxyCfgs:     pxyCfgs,
+		visitorCfgs: visitorCfgs,
+		exit:        0,
+		closedCh:    make(chan int),
 	}
-	ctl := NewControl(svr, pxyCfgs, visitorCfgs)
-	svr.ctl = ctl
 	return
 }
 
+func (svr *Service) GetController() *Control {
+	svr.ctlMu.RLock()
+	defer svr.ctlMu.RUnlock()
+	return svr.ctl
+}
+
 func (svr *Service) Run() error {
-	err := svr.ctl.Run()
-	if err != nil {
-		return err
+	// first login
+	for {
+		conn, session, err := svr.login()
+		if err != nil {
+			log.Warn("login to server failed: %v", err)
+
+			// if login_fail_exit is true, just exit this program
+			// otherwise sleep a while and try again to connect to server
+			if g.GlbClientCfg.LoginFailExit {
+				return err
+			} else {
+				time.Sleep(10 * time.Second)
+			}
+		} else {
+			// login success
+			ctl := NewControl(svr.runId, conn, session, svr.pxyCfgs, svr.visitorCfgs)
+			ctl.Run()
+			svr.ctlMu.Lock()
+			svr.ctl = ctl
+			svr.ctlMu.Unlock()
+			break
+		}
 	}
 
+	go svr.keepControllerWorking()
+
 	if g.GlbClientCfg.AdminPort != 0 {
-		err = svr.RunAdminServer(g.GlbClientCfg.AdminAddr, g.GlbClientCfg.AdminPort)
+		err := svr.RunAdminServer(g.GlbClientCfg.AdminAddr, g.GlbClientCfg.AdminPort)
 		if err != nil {
 			log.Warn("run admin server error: %v", err)
 		}
@@ -54,6 +104,119 @@ func (svr *Service) Run() error {
 	return nil
 }
 
+func (svr *Service) keepControllerWorking() {
+	maxDelayTime := 20 * time.Second
+	delayTime := time.Second
+
+	for {
+		<-svr.ctl.ClosedDoneCh()
+		if atomic.LoadUint32(&svr.exit) != 0 {
+			return
+		}
+
+		for {
+			log.Info("try to reconnect to server...")
+			conn, session, err := svr.login()
+			if err != nil {
+				log.Warn("reconnect to server error: %v", err)
+				time.Sleep(delayTime)
+				delayTime = delayTime * 2
+				if delayTime > maxDelayTime {
+					delayTime = maxDelayTime
+				}
+				continue
+			}
+			// reconnect success, init delayTime
+			delayTime = time.Second
+
+			ctl := NewControl(svr.runId, conn, session, svr.pxyCfgs, svr.visitorCfgs)
+			ctl.Run()
+			svr.ctlMu.Lock()
+			svr.ctl = ctl
+			svr.ctlMu.Unlock()
+			break
+		}
+	}
+}
+
+// login creates a connection to frps and registers it self as a client
+// conn: control connection
+// session: if it's not nil, using tcp mux
+func (svr *Service) login() (conn frpNet.Conn, session *fmux.Session, err error) {
+	conn, err = frpNet.ConnectServerByProxy(g.GlbClientCfg.HttpProxy, g.GlbClientCfg.Protocol,
+		fmt.Sprintf("%s:%d", g.GlbClientCfg.ServerAddr, g.GlbClientCfg.ServerPort))
+	if err != nil {
+		return
+	}
+
+	defer func() {
+		if err != nil {
+			conn.Close()
+		}
+	}()
+
+	if g.GlbClientCfg.TcpMux {
+		fmuxCfg := fmux.DefaultConfig()
+		fmuxCfg.LogOutput = ioutil.Discard
+		session, err = fmux.Client(conn, fmuxCfg)
+		if err != nil {
+			return
+		}
+		stream, errRet := session.OpenStream()
+		if errRet != nil {
+			session.Close()
+			err = errRet
+			return
+		}
+		conn = frpNet.WrapConn(stream)
+	}
+
+	now := time.Now().Unix()
+	loginMsg := &msg.Login{
+		Arch:         runtime.GOARCH,
+		Os:           runtime.GOOS,
+		PoolCount:    g.GlbClientCfg.PoolCount,
+		User:         g.GlbClientCfg.User,
+		Version:      version.Full(),
+		PrivilegeKey: util.GetAuthKey(g.GlbClientCfg.Token, now),
+		Timestamp:    now,
+		RunId:        svr.runId,
+	}
+
+	if err = msg.WriteMsg(conn, loginMsg); err != nil {
+		return
+	}
+
+	var loginRespMsg msg.LoginResp
+	conn.SetReadDeadline(time.Now().Add(10 * time.Second))
+	if err = msg.ReadMsgInto(conn, &loginRespMsg); err != nil {
+		return
+	}
+	conn.SetReadDeadline(time.Time{})
+
+	if loginRespMsg.Error != "" {
+		err = fmt.Errorf("%s", loginRespMsg.Error)
+		log.Error("%s", loginRespMsg.Error)
+		return
+	}
+
+	svr.runId = loginRespMsg.RunId
+	g.GlbClientCfg.ServerUdpPort = loginRespMsg.ServerUdpPort
+	log.Info("login to server success, get run id [%s], server udp port [%d]", loginRespMsg.RunId, loginRespMsg.ServerUdpPort)
+	return
+}
+
+func (svr *Service) ReloadConf(pxyCfgs map[string]config.ProxyConf, visitorCfgs map[string]config.VisitorConf) error {
+	svr.cfgMu.Lock()
+	svr.pxyCfgs = pxyCfgs
+	svr.visitorCfgs = visitorCfgs
+	svr.cfgMu.Unlock()
+
+	return svr.ctl.ReloadConf(pxyCfgs, visitorCfgs)
+}
+
 func (svr *Service) Close() {
+	atomic.StoreUint32(&svr.exit, 1)
 	svr.ctl.Close()
+	close(svr.closedCh)
 }

+ 1 - 1
conf/frpc_full.ini

@@ -25,7 +25,7 @@ token = 12345678
 admin_addr = 127.0.0.1
 admin_port = 7400
 admin_user = admin
-admin_passwd = admin
+admin_pwd = admin
 
 # connections will be established in advance, default value is zero
 pool_count = 5