瀏覽代碼

Merge pull request #1696 from fatedier/dev

bump version to v0.32.0
fatedier 5 年之前
父節點
當前提交
23bb76397a
共有 100 個文件被更改,包括 3618 次插入5693 次删除
  1. 1 0
      .gitignore
  2. 1 0
      .travis.yml
  3. 1 0
      Makefile
  4. 107 7
      README.md
  5. 101 3
      README_zh.md
  6. 27 2
      client/control.go
  7. 34 0
      client/proxy/proxy.go
  8. 20 13
      client/service.go
  9. 6 1
      cmd/frpc/sub/root.go
  10. 91 0
      cmd/frpc/sub/tcpmux.go
  11. 1 0
      cmd/frps/main.go
  12. 5 1
      cmd/frps/root.go
  13. 7 0
      conf/frpc_full.ini
  14. 41 0
      conf/frps_full.ini
  15. 6 2
      go.mod
  16. 95 5
      go.sum
  17. 151 0
      models/auth/auth.go
  18. 255 0
      models/auth/oidc.go
  19. 120 0
      models/auth/token.go
  20. 5 9
      models/config/client_common.go
  21. 80 1
      models/config/proxy.go
  22. 76 38
      models/config/server_common.go
  23. 14 6
      models/consts/consts.go
  24. 93 0
      models/metrics/aggregate/server.go
  25. 262 0
      models/metrics/mem/server.go
  26. 1 48
      models/metrics/mem/types.go
  27. 8 0
      models/metrics/metrics.go
  28. 95 0
      models/metrics/prometheus/server.go
  29. 10 1
      models/msg/msg.go
  30. 1 0
      models/plugin/server/http.go
  31. 23 23
      server/control.go
  32. 4 0
      server/controller/resource.go
  33. 6 0
      server/dashboard.go
  34. 13 4
      server/dashboard_api.go
  35. 37 0
      server/metrics/metrics.go
  36. 6 11
      server/proxy/http.go
  37. 32 32
      server/proxy/proxy.go
  38. 95 0
      server/proxy/tcpmux.go
  39. 11 9
      server/proxy/udp.go
  40. 53 20
      server/service.go
  41. 0 277
      server/stats/internal.go
  42. 72 0
      tests/ci/auth_test.go
  43. 7 0
      tests/ci/auto_test_frpc.ini
  44. 1 0
      tests/ci/auto_test_frps.ini
  45. 11 0
      tests/ci/normal_test.go
  46. 92 0
      tests/ci/tls_test.go
  47. 2 0
      tests/consts/consts.go
  48. 17 13
      utils/metric/metrics.go
  49. 6 1
      utils/net/tls.go
  50. 68 0
      utils/tcpmux/httpconnect.go
  51. 44 0
      utils/util/http.go
  52. 8 0
      utils/util/util.go
  53. 1 1
      utils/version/version.go
  54. 4 14
      utils/vhost/http.go
  55. 1 1
      utils/vhost/https.go
  56. 12 2
      utils/vhost/vhost.go
  57. 0 22
      vendor/github.com/armon/go-socks5/.gitignore
  58. 0 4
      vendor/github.com/armon/go-socks5/.travis.yml
  59. 0 20
      vendor/github.com/armon/go-socks5/LICENSE
  60. 0 45
      vendor/github.com/armon/go-socks5/README.md
  61. 0 151
      vendor/github.com/armon/go-socks5/auth.go
  62. 0 17
      vendor/github.com/armon/go-socks5/credentials.go
  63. 0 364
      vendor/github.com/armon/go-socks5/request.go
  64. 0 23
      vendor/github.com/armon/go-socks5/resolver.go
  65. 0 41
      vendor/github.com/armon/go-socks5/ruleset.go
  66. 0 169
      vendor/github.com/armon/go-socks5/socks5.go
  67. 2 0
      vendor/github.com/coreos/go-oidc/.gitignore
  68. 16 0
      vendor/github.com/coreos/go-oidc/.travis.yml
  69. 71 0
      vendor/github.com/coreos/go-oidc/CONTRIBUTING.md
  70. 36 35
      vendor/github.com/coreos/go-oidc/DCO
  71. 2 1
      vendor/github.com/coreos/go-oidc/LICENSE
  72. 3 0
      vendor/github.com/coreos/go-oidc/MAINTAINERS
  73. 5 0
      vendor/github.com/coreos/go-oidc/NOTICE
  74. 72 0
      vendor/github.com/coreos/go-oidc/README.md
  75. 61 0
      vendor/github.com/coreos/go-oidc/code-of-conduct.md
  76. 20 0
      vendor/github.com/coreos/go-oidc/jose.go
  77. 228 0
      vendor/github.com/coreos/go-oidc/jwks.go
  78. 409 0
      vendor/github.com/coreos/go-oidc/oidc.go
  79. 16 0
      vendor/github.com/coreos/go-oidc/test
  80. 336 0
      vendor/github.com/coreos/go-oidc/verify.go
  81. 0 15
      vendor/github.com/davecgh/go-spew/LICENSE
  82. 0 152
      vendor/github.com/davecgh/go-spew/spew/bypass.go
  83. 0 38
      vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
  84. 0 341
      vendor/github.com/davecgh/go-spew/spew/common.go
  85. 0 306
      vendor/github.com/davecgh/go-spew/spew/config.go
  86. 0 211
      vendor/github.com/davecgh/go-spew/spew/doc.go
  87. 0 509
      vendor/github.com/davecgh/go-spew/spew/dump.go
  88. 0 419
      vendor/github.com/davecgh/go-spew/spew/format.go
  89. 0 148
      vendor/github.com/davecgh/go-spew/spew/spew.go
  90. 0 13
      vendor/github.com/fatedier/beego/LICENSE
  91. 0 63
      vendor/github.com/fatedier/beego/logs/README.md
  92. 0 28
      vendor/github.com/fatedier/beego/logs/color.go
  93. 0 428
      vendor/github.com/fatedier/beego/logs/color_windows.go
  94. 0 117
      vendor/github.com/fatedier/beego/logs/conn.go
  95. 0 102
      vendor/github.com/fatedier/beego/logs/console.go
  96. 0 327
      vendor/github.com/fatedier/beego/logs/file.go
  97. 0 78
      vendor/github.com/fatedier/beego/logs/jianliao.go
  98. 0 657
      vendor/github.com/fatedier/beego/logs/log.go
  99. 0 188
      vendor/github.com/fatedier/beego/logs/logger.go
  100. 0 116
      vendor/github.com/fatedier/beego/logs/multifile.go

+ 1 - 0
.gitignore

@@ -27,6 +27,7 @@ _testmain.go
 bin/
 bin/
 packages/
 packages/
 test/bin/
 test/bin/
+vendor/
 
 
 # Cache
 # Cache
 *.swp
 *.swp

+ 1 - 0
.travis.yml

@@ -3,6 +3,7 @@ language: go
 
 
 go:
 go:
     - 1.12.x
     - 1.12.x
+    - 1.13.x
 
 
 install:
 install:
     - make
     - make

+ 1 - 0
Makefile

@@ -1,4 +1,5 @@
 export PATH := $(GOPATH)/bin:$(PATH)
 export PATH := $(GOPATH)/bin:$(PATH)
+export GO111MODULE=on
 
 
 all: fmt build
 all: fmt build
 
 

+ 107 - 7
README.md

@@ -30,7 +30,11 @@ frp also has a P2P connect mode.
     * [Using Environment Variables](#using-environment-variables)
     * [Using Environment Variables](#using-environment-variables)
     * [Dashboard](#dashboard)
     * [Dashboard](#dashboard)
     * [Admin UI](#admin-ui)
     * [Admin UI](#admin-ui)
+    * [Monitor](#monitor)
+        * [Prometheus](#prometheus)
     * [Authenticating the Client](#authenticating-the-client)
     * [Authenticating the Client](#authenticating-the-client)
+        * [Token Authentication](#token-authentication)
+        * [OIDC Authentication](#oidc-authentication)
     * [Encryption and Compression](#encryption-and-compression)
     * [Encryption and Compression](#encryption-and-compression)
         * [TLS](#tls)
         * [TLS](#tls)
     * [Hot-Reloading frpc configuration](#hot-reloading-frpc-configuration)
     * [Hot-Reloading frpc configuration](#hot-reloading-frpc-configuration)
@@ -49,9 +53,10 @@ frp also has a P2P connect mode.
     * [Get Real IP](#get-real-ip)
     * [Get Real IP](#get-real-ip)
         * [HTTP X-Forwarded-For](#http-x-forwarded-for)
         * [HTTP X-Forwarded-For](#http-x-forwarded-for)
         * [Proxy Protocol](#proxy-protocol)
         * [Proxy Protocol](#proxy-protocol)
-    * [Require HTTP Basic auth (password) for web services](#require-http-basic-auth-password-for-web-services)
-    * [Custom subdomain names](#custom-subdomain-names)
-    * [URL routing](#url-routing)
+    * [Require HTTP Basic Auth (Password) for Web Services](#require-http-basic-auth-password-for-web-services)
+    * [Custom Subdomain Names](#custom-subdomain-names)
+    * [URL Routing](#url-routing)
+    * [TCP Port Multiplexing](#tcp-port-multiplexing)
     * [Connecting to frps via HTTP PROXY](#connecting-to-frps-via-http-proxy)
     * [Connecting to frps via HTTP PROXY](#connecting-to-frps-via-http-proxy)
     * [Range ports mapping](#range-ports-mapping)
     * [Range ports mapping](#range-ports-mapping)
     * [Client Plugins](#client-plugins)
     * [Client Plugins](#client-plugins)
@@ -435,9 +440,59 @@ admin_pwd = admin
 
 
 Then visit `http://127.0.0.1:7400` to see admin UI, with username and password both being `admin` by default.
 Then visit `http://127.0.0.1:7400` to see admin UI, with username and password both being `admin` by default.
 
 
+### Monitor
+
+When dashboard is enabled, frps will save monitor data in cache. It will be cleared after process restart.
+
+Prometheus is also supported.
+
+#### Prometheus
+
+Enable dashboard first, then configure `enable_prometheus = true` in `frps.ini`.
+
+`http://{dashboard_addr}/metrics` will provide prometheus monitor data.
+
 ### Authenticating the Client
 ### Authenticating the Client
 
 
-Always use the same `token` in the `[common]` section in `frps.ini` and `frpc.ini`.
+There are 2 authentication methods to authenticate frpc with frps. 
+
+You can decide which one to use by configuring `authentication_method` under `[common]` in `frpc.ini` and `frps.ini`.
+
+Configuring `authenticate_heartbeats = true` under `[common]` will use the configured authentication method to add and validate authentication on every heartbeat between frpc and frps.
+
+Configuring `authenticate_new_work_conns = true` under `[common]` will do the same for every new work connection between frpc and frps.
+
+#### Token Authentication
+
+When specifying `authentication_method = token` under `[common]` in `frpc.ini` and `frps.ini` - token based authentication will be used.
+
+Make sure to specify the same `token` in the `[common]` section in `frps.ini` and `frpc.ini` for frpc to pass frps validation
+
+#### OIDC Authentication
+
+When specifying `authentication_method = oidc` under `[common]` in `frpc.ini` and `frps.ini` - OIDC based authentication will be used.
+
+OIDC stands for OpenID Connect, and the flow used is called [Client Credentials Grant](https://tools.ietf.org/html/rfc6749#section-4.4).
+
+To use this authentication type - configure `frpc.ini` and `frps.ini` as follows:
+
+```ini
+# frps.ini
+[common]
+authentication_method = oidc
+oidc_issuer = https://example-oidc-issuer.com/
+oidc_audience = https://oidc-audience.com/.default
+```
+
+```ini
+# frpc.ini
+[common]
+authentication_method = oidc
+oidc_client_id = 98692467-37de-409a-9fac-bb2585826f18 # Replace with OIDC client ID
+oidc_client_secret = oidc_secret
+oidc_audience = https://oidc-audience.com/.default
+oidc_token_endpoint_url = https://example-oidc-endpoint.com/oauth2/v2.0/token
+```
 
 
 ### Encryption and Compression
 ### Encryption and Compression
 
 
@@ -461,6 +516,8 @@ Config `tls_enable = true` in the `[common]` section to `frpc.ini` to enable thi
 
 
 For port multiplexing, frp sends a first byte `0x17` to dial a TLS connection.
 For port multiplexing, frp sends a first byte `0x17` to dial a TLS connection.
 
 
+To enforce `frps` to only accept TLS connections - configure `tls_only = true` in the `[common]` section in `frps.ini`.
+
 ### Hot-Reloading frpc configuration
 ### Hot-Reloading frpc configuration
 
 
 The `admin_addr` and `admin_port` fields are required for enabling HTTP API:
 The `admin_addr` and `admin_port` fields are required for enabling HTTP API:
@@ -712,7 +769,7 @@ proxy_protocol_version = v2
 
 
 You can enable Proxy Protocol support in nginx to expose user's real IP in HTTP header `X-Real-IP`, and then read `X-Real-IP` header in your web service for the real IP.
 You can enable Proxy Protocol support in nginx to expose user's real IP in HTTP header `X-Real-IP`, and then read `X-Real-IP` header in your web service for the real IP.
 
 
-### Require HTTP Basic auth (password) for web services
+### Require HTTP Basic Auth (Password) for Web Services
 
 
 Anyone who can guess your tunnel URL can access your local web server unless you protect it with a password.
 Anyone who can guess your tunnel URL can access your local web server unless you protect it with a password.
 
 
@@ -732,7 +789,7 @@ http_pwd = abc
 
 
 Visit `http://test.example.com` in the browser and now you are prompted to enter the username and password.
 Visit `http://test.example.com` in the browser and now you are prompted to enter the username and password.
 
 
-### Custom subdomain names
+### Custom Subdomain Names
 
 
 It is convenient to use `subdomain` configure for http and https types when many people share one frps server.
 It is convenient to use `subdomain` configure for http and https types when many people share one frps server.
 
 
@@ -755,7 +812,7 @@ Now you can visit your web service on `test.frps.com`.
 
 
 Note that if `subdomain_host` is not empty, `custom_domains` should not be the subdomain of `subdomain_host`.
 Note that if `subdomain_host` is not empty, `custom_domains` should not be the subdomain of `subdomain_host`.
 
 
-### URL routing
+### URL Routing
 
 
 frp supports forwarding HTTP requests to different backend web services by url routing.
 frp supports forwarding HTTP requests to different backend web services by url routing.
 
 
@@ -778,6 +835,49 @@ locations = /news,/about
 
 
 HTTP requests with URL prefix `/news` or `/about` will be forwarded to **web02** and other requests to **web01**.
 HTTP requests with URL prefix `/news` or `/about` will be forwarded to **web02** and other requests to **web01**.
 
 
+### TCP Port Multiplexing
+
+frp supports receiving TCP sockets directed to different proxies on a single port on frps, similar to `vhost_http_port` and `vhost_https_port`.
+
+The only supported TCP port multiplexing method available at the moment is `httpconnect` - HTTP CONNECT tunnel.
+
+When setting `tcpmux_httpconnect_port` to anything other than 0 in frps under `[common]`, frps will listen on this port for HTTP CONNECT requests.
+
+The host of the HTTP CONNECT request will be used to match the proxy in frps. Proxy hosts can be configured in frpc by configuring `custom_domain` and / or `subdomain` under `type = tcpmux` proxies, when `multiplexer = httpconnect`.
+
+For example:
+
+```ini
+# frps.ini
+[common]
+bind_port = 7000
+tcpmux_httpconnect_port = 1337
+```
+
+```ini
+# frpc.ini
+[common]
+server_addr = x.x.x.x
+server_port = 7000
+
+[proxy1]
+type = tcpmux
+multiplexer = httpconnect
+custom_domains = test1
+
+[proxy2]
+type = tcpmux
+multiplexer = httpconnect
+custom_domains = test2
+```
+
+In the above configuration - frps can be contacted on port 1337 with a HTTP CONNECT header such as:
+
+```
+CONNECT test1 HTTP/1.1\r\n\r\n
+```
+and the connection will be routed to `proxy1`.
+
 ### Connecting to frps via HTTP PROXY
 ### Connecting to frps via HTTP PROXY
 
 
 frpc can connect to frps using HTTP proxy if you set OS environment variable `HTTP_PROXY`, or if `http_proxy` is set in frpc.ini file.
 frpc can connect to frps using HTTP proxy if you set OS environment variable `HTTP_PROXY`, or if `http_proxy` is set in frpc.ini file.

+ 101 - 3
README_zh.md

@@ -26,7 +26,11 @@ frp 是一个可用于内网穿透的高性能的反向代理应用,支持 tcp
     * [配置文件模版渲染](#配置文件模版渲染)
     * [配置文件模版渲染](#配置文件模版渲染)
     * [Dashboard](#dashboard)
     * [Dashboard](#dashboard)
     * [Admin UI](#admin-ui)
     * [Admin UI](#admin-ui)
-    * [身份验证](#身份验证)
+    * [监控](#监控)
+        * [Prometheus](#prometheus)
+    * [客户端身份验证](#客户端身份验证)
+        * [Token](#token)
+        * [OIDC](#oidc)
     * [加密与压缩](#加密与压缩)
     * [加密与压缩](#加密与压缩)
         * [TLS](#tls)
         * [TLS](#tls)
     * [客户端热加载配置文件](#客户端热加载配置文件)
     * [客户端热加载配置文件](#客户端热加载配置文件)
@@ -48,6 +52,7 @@ frp 是一个可用于内网穿透的高性能的反向代理应用,支持 tcp
     * [通过密码保护你的 web 服务](#通过密码保护你的-web-服务)
     * [通过密码保护你的 web 服务](#通过密码保护你的-web-服务)
     * [自定义二级域名](#自定义二级域名)
     * [自定义二级域名](#自定义二级域名)
     * [URL 路由](#url-路由)
     * [URL 路由](#url-路由)
+    * [TCP 端口复用类型](#tcp-端口复用类型)
     * [通过代理连接 frps](#通过代理连接-frps)
     * [通过代理连接 frps](#通过代理连接-frps)
     * [范围端口映射](#范围端口映射)
     * [范围端口映射](#范围端口映射)
     * [客户端插件](#客户端插件)
     * [客户端插件](#客户端插件)
@@ -459,9 +464,56 @@ admin_pwd = admin
 
 
 如果想要在外网环境访问 Admin UI,将 7400 端口映射出去即可,但需要重视安全风险。
 如果想要在外网环境访问 Admin UI,将 7400 端口映射出去即可,但需要重视安全风险。
 
 
-### 身份验证
+### 监控
 
 
-服务端和客户端的 common 配置中的 `token` 参数一致则身份验证通过。
+frps 当启用 Dashboard 后,会默认开启内部的监控,数据存放在内存中,每次重启进程后会清空,监控数据可以通过 dashboard 的地址发送 HTTP 请求获取。
+
+目前还支持 Prometheus 作为可选的监控系统。
+
+#### Prometheus
+
+在 `frps.ini` 中启用 Dashboard,并且设置 `enable_prometheus = true`,则通过 `http://{dashboard_addr}/metrics` 可以获取到 Prometheus 的监控数据。
+
+### 客户端身份验证
+
+目前 frpc 和 frps 之间支持两种身份验证方式,`token` 和 `oidc`。
+
+通过 `frpc.ini` 和 `frps.ini` 中 `[common]` section 的 `authentication_method` 参数配置需要使用的验证方法。
+
+`authenticate_heartbeats = true` 将会在每一个心跳包中附加上鉴权信息。
+
+`authenticate_new_work_conns = true` 将会在每次建立新的工作连接时附加上鉴权信息。
+
+#### Token
+
+当 `authentication_method = token`,将会启用基于 token 的验证方式。
+
+需要在 `frpc.ini` 和 `frps.ini` 的 `[common]` section 中设置相同的 `token`。
+
+#### OIDC
+
+当 `authentication_method = oidc`,将会启用基于 OIDC 的身份验证。
+
+验证流程参考 [Client Credentials Grant](https://tools.ietf.org/html/rfc6749#section-4.4)
+
+启用这一验证方式,配置 `frpc.ini` 和 `frps.ini` 如下:
+
+```ini
+# frps.ini
+[common]
+authentication_method = oidc
+oidc_issuer = https://example-oidc-issuer.com/
+oidc_audience = https://oidc-audience.com/.default
+```
+
+```ini
+[common]
+authentication_method = oidc
+oidc_client_id = 98692467-37de-409a-9fac-bb2585826f18 # Replace with OIDC client ID
+oidc_client_secret = oidc_secret
+oidc_audience = https://oidc-audience.com/.default
+oidc_token_endpoint_url = https://example-oidc-endpoint.com/oauth2/v2.0/token
+```
 
 
 ### 加密与压缩
 ### 加密与压缩
 
 
@@ -487,6 +539,8 @@ use_compression = true
 
 
 为了端口复用,frp 建立 TLS 连接的第一个字节为 0x17。
 为了端口复用,frp 建立 TLS 连接的第一个字节为 0x17。
 
 
+通过将 frps.ini 的 `[common]` 中 `tls_only` 设置为 true,可以强制 frps 只接受 TLS 连接。
+
 **注意: 启用此功能后除 xtcp 外,不需要再设置 use_encryption。**
 **注意: 启用此功能后除 xtcp 外,不需要再设置 use_encryption。**
 
 
 ### 客户端热加载配置文件
 ### 客户端热加载配置文件
@@ -824,6 +878,50 @@ locations = /news,/about
 
 
 按照上述的示例配置后,`web.yourdomain.com` 这个域名下所有以 `/news` 以及 `/about` 作为前缀的 URL 请求都会被转发到 web02,其余的请求会被转发到 web01。
 按照上述的示例配置后,`web.yourdomain.com` 这个域名下所有以 `/news` 以及 `/about` 作为前缀的 URL 请求都会被转发到 web02,其余的请求会被转发到 web01。
 
 
+### TCP 端口复用类型
+
+frp 支持将单个端口收到的连接路由到不同的代理,类似 `vhost_http_port` 和 `vhost_https_port`。
+
+目前支持的复用器只有 `httpconnect`。
+
+当在 `frps.ini` 的 `[common]` 中设置 `tcpmux_httpconnect_port`,frps 将会监听在这个端口,接收 HTTP CONNECT 请求。
+
+frps 会根据 HTTP CONNECT 请求中的 host 路由到不同的后端代理。
+
+示例配置如下:
+
+```ini
+# frps.ini
+[common]
+bind_port = 7000
+tcpmux_httpconnect_port = 1337
+```
+
+```ini
+# frpc.ini
+[common]
+server_addr = x.x.x.x
+server_port = 7000
+
+[proxy1]
+type = tcpmux
+multiplexer = httpconnect
+custom_domains = test1
+
+[proxy2]
+type = tcpmux
+multiplexer = httpconnect
+custom_domains = test2
+```
+
+通过上面的配置,frps 如果接收到 HTTP CONNECT 请求内容:
+
+```
+CONNECT test1 HTTP/1.1\r\n\r\n
+```
+
+该连接将会被路由到 proxy1 。
+
 ### 通过代理连接 frps
 ### 通过代理连接 frps
 
 
 在只能通过代理访问外网的环境内,frpc 支持通过 HTTP PROXY 和 frps 进行通信。
 在只能通过代理访问外网的环境内,frpc 支持通过 HTTP PROXY 和 frps 进行通信。

+ 27 - 2
client/control.go

@@ -25,6 +25,7 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/fatedier/frp/client/proxy"
 	"github.com/fatedier/frp/client/proxy"
+	"github.com/fatedier/frp/models/auth"
 	"github.com/fatedier/frp/models/config"
 	"github.com/fatedier/frp/models/config"
 	"github.com/fatedier/frp/models/msg"
 	"github.com/fatedier/frp/models/msg"
 	frpNet "github.com/fatedier/frp/utils/net"
 	frpNet "github.com/fatedier/frp/utils/net"
@@ -82,13 +83,17 @@ type Control struct {
 
 
 	// service context
 	// service context
 	ctx context.Context
 	ctx context.Context
+
+	// sets authentication based on selected method
+	authSetter auth.Setter
 }
 }
 
 
 func NewControl(ctx context.Context, runId string, conn net.Conn, session *fmux.Session,
 func NewControl(ctx context.Context, runId string, conn net.Conn, session *fmux.Session,
 	clientCfg config.ClientCommonConf,
 	clientCfg config.ClientCommonConf,
 	pxyCfgs map[string]config.ProxyConf,
 	pxyCfgs map[string]config.ProxyConf,
 	visitorCfgs map[string]config.VisitorConf,
 	visitorCfgs map[string]config.VisitorConf,
-	serverUDPPort int) *Control {
+	serverUDPPort int,
+	authSetter auth.Setter) *Control {
 
 
 	// new xlog instance
 	// new xlog instance
 	ctl := &Control{
 	ctl := &Control{
@@ -107,6 +112,7 @@ func NewControl(ctx context.Context, runId string, conn net.Conn, session *fmux.
 		serverUDPPort:      serverUDPPort,
 		serverUDPPort:      serverUDPPort,
 		xl:                 xlog.FromContextSafe(ctx),
 		xl:                 xlog.FromContextSafe(ctx),
 		ctx:                ctx,
 		ctx:                ctx,
+		authSetter:         authSetter,
 	}
 	}
 	ctl.pm = proxy.NewProxyManager(ctl.ctx, ctl.sendCh, clientCfg, serverUDPPort)
 	ctl.pm = proxy.NewProxyManager(ctl.ctx, ctl.sendCh, clientCfg, serverUDPPort)
 
 
@@ -136,6 +142,10 @@ func (ctl *Control) HandleReqWorkConn(inMsg *msg.ReqWorkConn) {
 	m := &msg.NewWorkConn{
 	m := &msg.NewWorkConn{
 		RunId: ctl.runId,
 		RunId: ctl.runId,
 	}
 	}
+	if err = ctl.authSetter.SetNewWorkConn(m); err != nil {
+		xl.Warn("error during NewWorkConn authentication: %v", err)
+		return
+	}
 	if err = msg.WriteMsg(workConn, m); err != nil {
 	if err = msg.WriteMsg(workConn, m); err != nil {
 		xl.Warn("work connection write to server error: %v", err)
 		xl.Warn("work connection write to server error: %v", err)
 		workConn.Close()
 		workConn.Close()
@@ -148,6 +158,11 @@ func (ctl *Control) HandleReqWorkConn(inMsg *msg.ReqWorkConn) {
 		workConn.Close()
 		workConn.Close()
 		return
 		return
 	}
 	}
+	if startMsg.Error != "" {
+		xl.Error("StartWorkConn contains error: %s", startMsg.Error)
+		workConn.Close()
+		return
+	}
 
 
 	// dispatch this work connection to related proxy
 	// dispatch this work connection to related proxy
 	ctl.pm.HandleWorkConn(startMsg.ProxyName, workConn, &startMsg)
 	ctl.pm.HandleWorkConn(startMsg.ProxyName, workConn, &startMsg)
@@ -282,7 +297,12 @@ func (ctl *Control) msgHandler() {
 		case <-hbSend.C:
 		case <-hbSend.C:
 			// send heartbeat to server
 			// send heartbeat to server
 			xl.Debug("send heartbeat to server")
 			xl.Debug("send heartbeat to server")
-			ctl.sendCh <- &msg.Ping{}
+			pingMsg := &msg.Ping{}
+			if err := ctl.authSetter.SetPing(pingMsg); err != nil {
+				xl.Warn("error during ping authentication: %v", err)
+				return
+			}
+			ctl.sendCh <- pingMsg
 		case <-hbCheck.C:
 		case <-hbCheck.C:
 			if time.Since(ctl.lastPong) > time.Duration(ctl.clientCfg.HeartBeatTimeout)*time.Second {
 			if time.Since(ctl.lastPong) > time.Duration(ctl.clientCfg.HeartBeatTimeout)*time.Second {
 				xl.Warn("heartbeat timeout")
 				xl.Warn("heartbeat timeout")
@@ -301,6 +321,11 @@ func (ctl *Control) msgHandler() {
 			case *msg.NewProxyResp:
 			case *msg.NewProxyResp:
 				ctl.HandleNewProxyResp(m)
 				ctl.HandleNewProxyResp(m)
 			case *msg.Pong:
 			case *msg.Pong:
+				if m.Error != "" {
+					xl.Error("Pong contains error: %s", m.Error)
+					ctl.conn.Close()
+					return
+				}
 				ctl.lastPong = time.Now()
 				ctl.lastPong = time.Now()
 				xl.Debug("receive heartbeat from server")
 				xl.Debug("receive heartbeat from server")
 			}
 			}

+ 34 - 0
client/proxy/proxy.go

@@ -72,6 +72,11 @@ func NewProxy(ctx context.Context, pxyConf config.ProxyConf, clientCfg config.Cl
 			BaseProxy: &baseProxy,
 			BaseProxy: &baseProxy,
 			cfg:       cfg,
 			cfg:       cfg,
 		}
 		}
+	case *config.TcpMuxProxyConf:
+		pxy = &TcpMuxProxy{
+			BaseProxy: &baseProxy,
+			cfg:       cfg,
+		}
 	case *config.UdpProxyConf:
 	case *config.UdpProxyConf:
 		pxy = &UdpProxy{
 		pxy = &UdpProxy{
 			BaseProxy: &baseProxy,
 			BaseProxy: &baseProxy,
@@ -141,6 +146,35 @@ func (pxy *TcpProxy) InWorkConn(conn net.Conn, m *msg.StartWorkConn) {
 		conn, []byte(pxy.clientCfg.Token), m)
 		conn, []byte(pxy.clientCfg.Token), m)
 }
 }
 
 
+// TCP Multiplexer
+type TcpMuxProxy struct {
+	*BaseProxy
+
+	cfg         *config.TcpMuxProxyConf
+	proxyPlugin plugin.Plugin
+}
+
+func (pxy *TcpMuxProxy) Run() (err error) {
+	if pxy.cfg.Plugin != "" {
+		pxy.proxyPlugin, err = plugin.Create(pxy.cfg.Plugin, pxy.cfg.PluginParams)
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+func (pxy *TcpMuxProxy) Close() {
+	if pxy.proxyPlugin != nil {
+		pxy.proxyPlugin.Close()
+	}
+}
+
+func (pxy *TcpMuxProxy) InWorkConn(conn net.Conn, m *msg.StartWorkConn) {
+	HandleTcpWorkConnection(pxy.ctx, &pxy.cfg.LocalSvrConf, pxy.proxyPlugin, &pxy.cfg.BaseProxyConf, pxy.limiter,
+		conn, []byte(pxy.clientCfg.Token), m)
+}
+
 // HTTP
 // HTTP
 type HttpProxy struct {
 type HttpProxy struct {
 	*BaseProxy
 	*BaseProxy

+ 20 - 13
client/service.go

@@ -26,11 +26,11 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/fatedier/frp/assets"
 	"github.com/fatedier/frp/assets"
+	"github.com/fatedier/frp/models/auth"
 	"github.com/fatedier/frp/models/config"
 	"github.com/fatedier/frp/models/config"
 	"github.com/fatedier/frp/models/msg"
 	"github.com/fatedier/frp/models/msg"
 	"github.com/fatedier/frp/utils/log"
 	"github.com/fatedier/frp/utils/log"
 	frpNet "github.com/fatedier/frp/utils/net"
 	frpNet "github.com/fatedier/frp/utils/net"
-	"github.com/fatedier/frp/utils/util"
 	"github.com/fatedier/frp/utils/version"
 	"github.com/fatedier/frp/utils/version"
 	"github.com/fatedier/frp/utils/xlog"
 	"github.com/fatedier/frp/utils/xlog"
 
 
@@ -46,6 +46,9 @@ type Service struct {
 	ctl   *Control
 	ctl   *Control
 	ctlMu sync.RWMutex
 	ctlMu sync.RWMutex
 
 
+	// Sets authentication based on selected method
+	authSetter auth.Setter
+
 	cfg         config.ClientCommonConf
 	cfg         config.ClientCommonConf
 	pxyCfgs     map[string]config.ProxyConf
 	pxyCfgs     map[string]config.ProxyConf
 	visitorCfgs map[string]config.VisitorConf
 	visitorCfgs map[string]config.VisitorConf
@@ -70,6 +73,7 @@ func NewService(cfg config.ClientCommonConf, pxyCfgs map[string]config.ProxyConf
 
 
 	ctx, cancel := context.WithCancel(context.Background())
 	ctx, cancel := context.WithCancel(context.Background())
 	svr = &Service{
 	svr = &Service{
+		authSetter:  auth.NewAuthSetter(cfg.AuthClientConfig),
 		cfg:         cfg,
 		cfg:         cfg,
 		cfgFile:     cfgFile,
 		cfgFile:     cfgFile,
 		pxyCfgs:     pxyCfgs,
 		pxyCfgs:     pxyCfgs,
@@ -105,7 +109,7 @@ func (svr *Service) Run() error {
 			}
 			}
 		} else {
 		} else {
 			// login success
 			// login success
-			ctl := NewControl(svr.ctx, svr.runId, conn, session, svr.cfg, svr.pxyCfgs, svr.visitorCfgs, svr.serverUDPPort)
+			ctl := NewControl(svr.ctx, svr.runId, conn, session, svr.cfg, svr.pxyCfgs, svr.visitorCfgs, svr.serverUDPPort, svr.authSetter)
 			ctl.Run()
 			ctl.Run()
 			svr.ctlMu.Lock()
 			svr.ctlMu.Lock()
 			svr.ctl = ctl
 			svr.ctl = ctl
@@ -159,7 +163,7 @@ func (svr *Service) keepControllerWorking() {
 			// reconnect success, init delayTime
 			// reconnect success, init delayTime
 			delayTime = time.Second
 			delayTime = time.Second
 
 
-			ctl := NewControl(svr.ctx, svr.runId, conn, session, svr.cfg, svr.pxyCfgs, svr.visitorCfgs, svr.serverUDPPort)
+			ctl := NewControl(svr.ctx, svr.runId, conn, session, svr.cfg, svr.pxyCfgs, svr.visitorCfgs, svr.serverUDPPort, svr.authSetter)
 			ctl.Run()
 			ctl.Run()
 			svr.ctlMu.Lock()
 			svr.ctlMu.Lock()
 			svr.ctl = ctl
 			svr.ctl = ctl
@@ -212,17 +216,20 @@ func (svr *Service) login() (conn net.Conn, session *fmux.Session, err error) {
 		conn = stream
 		conn = stream
 	}
 	}
 
 
-	now := time.Now().Unix()
 	loginMsg := &msg.Login{
 	loginMsg := &msg.Login{
-		Arch:         runtime.GOARCH,
-		Os:           runtime.GOOS,
-		PoolCount:    svr.cfg.PoolCount,
-		User:         svr.cfg.User,
-		Version:      version.Full(),
-		PrivilegeKey: util.GetAuthKey(svr.cfg.Token, now),
-		Timestamp:    now,
-		RunId:        svr.runId,
-		Metas:        svr.cfg.Metas,
+		Arch:      runtime.GOARCH,
+		Os:        runtime.GOOS,
+		PoolCount: svr.cfg.PoolCount,
+		User:      svr.cfg.User,
+		Version:   version.Full(),
+		Timestamp: time.Now().Unix(),
+		RunId:     svr.runId,
+		Metas:     svr.cfg.Metas,
+	}
+
+	// Add auth
+	if err = svr.authSetter.SetLogin(loginMsg); err != nil {
+		return
 	}
 	}
 
 
 	if err = msg.WriteMsg(conn, loginMsg); err != nil {
 	if err = msg.WriteMsg(conn, loginMsg); err != nil {

+ 6 - 1
cmd/frpc/sub/root.go

@@ -28,6 +28,7 @@ import (
 	"github.com/spf13/cobra"
 	"github.com/spf13/cobra"
 
 
 	"github.com/fatedier/frp/client"
 	"github.com/fatedier/frp/client"
+	"github.com/fatedier/frp/models/auth"
 	"github.com/fatedier/frp/models/config"
 	"github.com/fatedier/frp/models/config"
 	"github.com/fatedier/frp/utils/log"
 	"github.com/fatedier/frp/utils/log"
 	"github.com/fatedier/frp/utils/version"
 	"github.com/fatedier/frp/utils/version"
@@ -65,6 +66,7 @@ var (
 	hostHeaderRewrite string
 	hostHeaderRewrite string
 	role              string
 	role              string
 	sk                string
 	sk                string
+	multiplexer       string
 	serverName        string
 	serverName        string
 	bindAddr          string
 	bindAddr          string
 	bindPort          int
 	bindPort          int
@@ -157,7 +159,6 @@ func parseClientCommonCfgFromCmd() (cfg config.ClientCommonConf, err error) {
 
 
 	cfg.User = user
 	cfg.User = user
 	cfg.Protocol = protocol
 	cfg.Protocol = protocol
-	cfg.Token = token
 	cfg.LogLevel = logLevel
 	cfg.LogLevel = logLevel
 	cfg.LogFile = logFile
 	cfg.LogFile = logFile
 	cfg.LogMaxDays = int64(logMaxDays)
 	cfg.LogMaxDays = int64(logMaxDays)
@@ -168,6 +169,10 @@ func parseClientCommonCfgFromCmd() (cfg config.ClientCommonConf, err error) {
 	}
 	}
 	cfg.DisableLogColor = disableLogColor
 	cfg.DisableLogColor = disableLogColor
 
 
+	// Only token authentication is supported in cmd mode
+	cfg.AuthClientConfig = auth.GetDefaultAuthClientConf()
+	cfg.Token = token
+
 	return
 	return
 }
 }
 
 

+ 91 - 0
cmd/frpc/sub/tcpmux.go

@@ -0,0 +1,91 @@
+// Copyright 2020 guylewin, guy@lewin.co.il
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sub
+
+import (
+	"fmt"
+	"os"
+	"strings"
+
+	"github.com/spf13/cobra"
+
+	"github.com/fatedier/frp/models/config"
+	"github.com/fatedier/frp/models/consts"
+)
+
+func init() {
+	tcpMuxCmd.PersistentFlags().StringVarP(&serverAddr, "server_addr", "s", "127.0.0.1:7000", "frp server's address")
+	tcpMuxCmd.PersistentFlags().StringVarP(&user, "user", "u", "", "user")
+	tcpMuxCmd.PersistentFlags().StringVarP(&protocol, "protocol", "p", "tcp", "tcp or kcp or websocket")
+	tcpMuxCmd.PersistentFlags().StringVarP(&token, "token", "t", "", "auth token")
+	tcpMuxCmd.PersistentFlags().StringVarP(&logLevel, "log_level", "", "info", "log level")
+	tcpMuxCmd.PersistentFlags().StringVarP(&logFile, "log_file", "", "console", "console or file path")
+	tcpMuxCmd.PersistentFlags().IntVarP(&logMaxDays, "log_max_days", "", 3, "log file reversed days")
+	tcpMuxCmd.PersistentFlags().BoolVarP(&disableLogColor, "disable_log_color", "", false, "disable log color in console")
+
+	tcpMuxCmd.PersistentFlags().StringVarP(&proxyName, "proxy_name", "n", "", "proxy name")
+	tcpMuxCmd.PersistentFlags().StringVarP(&localIp, "local_ip", "i", "127.0.0.1", "local ip")
+	tcpMuxCmd.PersistentFlags().IntVarP(&localPort, "local_port", "l", 0, "local port")
+	tcpMuxCmd.PersistentFlags().StringVarP(&customDomains, "custom_domain", "d", "", "custom domain")
+	tcpMuxCmd.PersistentFlags().StringVarP(&subDomain, "sd", "", "", "sub domain")
+	tcpMuxCmd.PersistentFlags().StringVarP(&multiplexer, "mux", "", "", "multiplexer")
+	tcpMuxCmd.PersistentFlags().BoolVarP(&useEncryption, "ue", "", false, "use encryption")
+	tcpMuxCmd.PersistentFlags().BoolVarP(&useCompression, "uc", "", false, "use compression")
+
+	rootCmd.AddCommand(tcpMuxCmd)
+}
+
+var tcpMuxCmd = &cobra.Command{
+	Use:   "tcpmux",
+	Short: "Run frpc with a single tcpmux proxy",
+	RunE: func(cmd *cobra.Command, args []string) error {
+		clientCfg, err := parseClientCommonCfg(CfgFileTypeCmd, "")
+		if err != nil {
+			fmt.Println(err)
+			os.Exit(1)
+		}
+
+		cfg := &config.TcpMuxProxyConf{}
+		var prefix string
+		if user != "" {
+			prefix = user + "."
+		}
+		cfg.ProxyName = prefix + proxyName
+		cfg.ProxyType = consts.TcpMuxProxy
+		cfg.LocalIp = localIp
+		cfg.LocalPort = localPort
+		cfg.CustomDomains = strings.Split(customDomains, ",")
+		cfg.SubDomain = subDomain
+		cfg.Multiplexer = multiplexer
+		cfg.UseEncryption = useEncryption
+		cfg.UseCompression = useCompression
+
+		err = cfg.CheckForCli()
+		if err != nil {
+			fmt.Println(err)
+			os.Exit(1)
+		}
+
+		proxyConfs := map[string]config.ProxyConf{
+			cfg.ProxyName: cfg,
+		}
+		err = startService(clientCfg, proxyConfs, nil, "")
+		if err != nil {
+			fmt.Println(err)
+			os.Exit(1)
+		}
+		return nil
+	},
+}

+ 1 - 0
cmd/frps/main.go

@@ -21,6 +21,7 @@ import (
 	"github.com/fatedier/golib/crypto"
 	"github.com/fatedier/golib/crypto"
 
 
 	_ "github.com/fatedier/frp/assets/frps/statik"
 	_ "github.com/fatedier/frp/assets/frps/statik"
+	_ "github.com/fatedier/frp/models/metrics"
 )
 )
 
 
 func main() {
 func main() {

+ 5 - 1
cmd/frps/root.go

@@ -20,6 +20,7 @@ import (
 
 
 	"github.com/spf13/cobra"
 	"github.com/spf13/cobra"
 
 
+	"github.com/fatedier/frp/models/auth"
 	"github.com/fatedier/frp/models/config"
 	"github.com/fatedier/frp/models/config"
 	"github.com/fatedier/frp/server"
 	"github.com/fatedier/frp/server"
 	"github.com/fatedier/frp/utils/log"
 	"github.com/fatedier/frp/utils/log"
@@ -171,8 +172,11 @@ func parseServerCommonCfgFromCmd() (cfg config.ServerCommonConf, err error) {
 	cfg.LogFile = logFile
 	cfg.LogFile = logFile
 	cfg.LogLevel = logLevel
 	cfg.LogLevel = logLevel
 	cfg.LogMaxDays = logMaxDays
 	cfg.LogMaxDays = logMaxDays
-	cfg.Token = token
 	cfg.SubDomainHost = subDomainHost
 	cfg.SubDomainHost = subDomainHost
+
+	// Only token authentication is supported in cmd mode
+	cfg.AuthServerConfig = auth.GetDefaultAuthServerConf()
+	cfg.Token = token
 	if len(allowPorts) > 0 {
 	if len(allowPorts) > 0 {
 		// e.g. 1000-2000,2001,2002,3000-4000
 		// e.g. 1000-2000,2001,2002,3000-4000
 		ports, errRet := util.ParseRangeNumbers(allowPorts)
 		ports, errRet := util.ParseRangeNumbers(allowPorts)

+ 7 - 0
conf/frpc_full.ini

@@ -264,3 +264,10 @@ bind_addr = 127.0.0.1
 bind_port = 9001
 bind_port = 9001
 use_encryption = false
 use_encryption = false
 use_compression = false
 use_compression = false
+
+[tcpmuxhttpconnect]
+type = tcpmux
+multiplexer = httpconnect
+local_ip = 127.0.0.1
+local_port = 10701
+custom_domains = tunnel1

+ 41 - 0
conf/frps_full.ini

@@ -23,6 +23,12 @@ vhost_https_port = 443
 # response header timeout(seconds) for vhost http server, default is 60s
 # response header timeout(seconds) for vhost http server, default is 60s
 # vhost_http_timeout = 60
 # vhost_http_timeout = 60
 
 
+# TcpMuxHttpConnectPort specifies the port that the server listens for TCP
+# HTTP CONNECT requests. If the value is 0, the server will not multiplex TCP
+# requests on one single port. If it's not - it will listen on this value for
+# HTTP CONNECT requests. By default, this value is 0.
+# tcpmux_httpconnect_port = 1337
+
 # set dashboard_addr and dashboard_port to view dashboard of frps
 # set dashboard_addr and dashboard_port to view dashboard of frps
 # dashboard_addr's default value is same with bind_addr
 # dashboard_addr's default value is same with bind_addr
 # dashboard is available only if dashboard_port is set
 # dashboard is available only if dashboard_port is set
@@ -33,6 +39,9 @@ dashboard_port = 7500
 dashboard_user = admin
 dashboard_user = admin
 dashboard_pwd = admin
 dashboard_pwd = admin
 
 
+# enable_prometheus will export prometheus metrics on {dashboard_addr}:{dashboard_port} in /metrics api.
+enable_prometheus = true
+
 # dashboard assets directory(only for debug mode)
 # dashboard assets directory(only for debug mode)
 # assets_dir = ./static
 # assets_dir = ./static
 # console or real logFile path like ./frps.log
 # console or real logFile path like ./frps.log
@@ -46,9 +55,38 @@ log_max_days = 3
 # disable log colors when log_file is console, default is false
 # disable log colors when log_file is console, default is false
 disable_log_color = false
 disable_log_color = false
 
 
+# DetailedErrorsToClient defines whether to send the specific error (with debug info) to frpc. By default, this value is true.
+detailed_errors_to_client = true
+
+# AuthenticationMethod specifies what authentication method to use authenticate frpc with frps.
+# If "token" is specified - token will be read into login message.
+# If "oidc" is specified - OIDC (Open ID Connect) token will be issued using OIDC settings. By default, this value is "token".
+authentication_method = token
+
+# AuthenticateHeartBeats specifies whether to include authentication token in heartbeats sent to frps. By default, this value is false.
+authenticate_heartbeats = false
+
+# AuthenticateNewWorkConns specifies whether to include authentication token in new work connections sent to frps. By default, this value is false.
+authenticate_new_work_conns = false
+
 # auth token
 # auth token
 token = 12345678
 token = 12345678
 
 
+# OidcClientId specifies the client ID to use to get a token in OIDC authentication if AuthenticationMethod == "oidc".
+# By default, this value is "".
+oidc_client_id =
+
+# OidcClientSecret specifies the client secret to use to get a token in OIDC authentication if AuthenticationMethod == "oidc".
+# By default, this value is "".
+oidc_client_secret = 
+
+# OidcAudience specifies the audience of the token in OIDC authentication if AuthenticationMethod == "oidc". By default, this value is "".
+oidc_audience = 
+
+# OidcTokenEndpointUrl specifies the URL which implements OIDC Token Endpoint.
+# It will be used to get an OIDC token if AuthenticationMethod == "oidc". By default, this value is "".
+oidc_token_endpoint_url = 
+
 # heartbeat configure, it's not recommended to modify the default value
 # heartbeat configure, it's not recommended to modify the default value
 # the default value of heartbeat_timeout is 90
 # the default value of heartbeat_timeout is 90
 # heartbeat_timeout = 90
 # heartbeat_timeout = 90
@@ -62,6 +100,9 @@ max_pool_count = 5
 # max ports can be used for each client, default value is 0 means no limit
 # max ports can be used for each client, default value is 0 means no limit
 max_ports_per_client = 0
 max_ports_per_client = 0
 
 
+# TlsOnly specifies whether to only accept TLS-encrypted connections. By default, the value is false.
+tls_only = false
+
 # if subdomain_host is not empty, you can set subdomain when type is http or https in frpc's configure file
 # if subdomain_host is not empty, you can set subdomain when type is http or https in frpc's configure file
 # when subdomain is test, the host used by routing is test.frps.com
 # when subdomain is test, the host used by routing is test.frps.com
 subdomain_host = frps.com
 subdomain_host = frps.com

+ 6 - 2
go.mod

@@ -4,6 +4,7 @@ go 1.12
 
 
 require (
 require (
 	github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5
 	github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5
+	github.com/coreos/go-oidc v2.2.1+incompatible
 	github.com/fatedier/beego v0.0.0-20171024143340-6c6a4f5bd5eb
 	github.com/fatedier/beego v0.0.0-20171024143340-6c6a4f5bd5eb
 	github.com/fatedier/golib v0.0.0-20181107124048-ff8cd814b049
 	github.com/fatedier/golib v0.0.0-20181107124048-ff8cd814b049
 	github.com/fatedier/kcp-go v2.0.4-0.20190803094908-fe8645b0a904+incompatible
 	github.com/fatedier/kcp-go v2.0.4-0.20190803094908-fe8645b0a904+incompatible
@@ -16,18 +17,21 @@ require (
 	github.com/klauspost/reedsolomon v1.9.1 // indirect
 	github.com/klauspost/reedsolomon v1.9.1 // indirect
 	github.com/mattn/go-runewidth v0.0.4 // indirect
 	github.com/mattn/go-runewidth v0.0.4 // indirect
 	github.com/pires/go-proxyproto v0.0.0-20190111085350-4d51b51e3bfc
 	github.com/pires/go-proxyproto v0.0.0-20190111085350-4d51b51e3bfc
-	github.com/pkg/errors v0.8.0 // indirect
+	github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect
+	github.com/prometheus/client_golang v1.4.1
 	github.com/rakyll/statik v0.1.1
 	github.com/rakyll/statik v0.1.1
 	github.com/rodaine/table v1.0.0
 	github.com/rodaine/table v1.0.0
 	github.com/spf13/cobra v0.0.3
 	github.com/spf13/cobra v0.0.3
 	github.com/spf13/pflag v1.0.1 // indirect
 	github.com/spf13/pflag v1.0.1 // indirect
-	github.com/stretchr/testify v1.3.0
+	github.com/stretchr/testify v1.4.0
 	github.com/templexxx/cpufeat v0.0.0-20170927014610-3794dfbfb047 // indirect
 	github.com/templexxx/cpufeat v0.0.0-20170927014610-3794dfbfb047 // indirect
 	github.com/templexxx/xor v0.0.0-20170926022130-0af8e873c554 // indirect
 	github.com/templexxx/xor v0.0.0-20170926022130-0af8e873c554 // indirect
 	github.com/tjfoc/gmsm v0.0.0-20171124023159-98aa888b79d8 // indirect
 	github.com/tjfoc/gmsm v0.0.0-20171124023159-98aa888b79d8 // indirect
 	github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec
 	github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec
 	github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae // indirect
 	github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae // indirect
 	golang.org/x/net v0.0.0-20190724013045-ca1201d0de80
 	golang.org/x/net v0.0.0-20190724013045-ca1201d0de80
+	golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
 	golang.org/x/text v0.3.2 // indirect
 	golang.org/x/text v0.3.2 // indirect
 	golang.org/x/time v0.0.0-20191024005414-555d28b269f0
 	golang.org/x/time v0.0.0-20191024005414-555d28b269f0
+	gopkg.in/square/go-jose.v2 v2.4.1 // indirect
 )
 )

+ 95 - 5
go.sum

@@ -1,51 +1,141 @@
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk=
+github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/fatedier/beego v0.0.0-20171024143340-6c6a4f5bd5eb/go.mod h1:wx3gB6dbIfBRcucp94PI9Bt3I0F2c/MyNEWuhzpWiwk=
 github.com/fatedier/beego v0.0.0-20171024143340-6c6a4f5bd5eb/go.mod h1:wx3gB6dbIfBRcucp94PI9Bt3I0F2c/MyNEWuhzpWiwk=
 github.com/fatedier/golib v0.0.0-20181107124048-ff8cd814b049 h1:teH578mf2ii42NHhIp3PhgvjU5bv+NFMq9fSQR8NaG8=
 github.com/fatedier/golib v0.0.0-20181107124048-ff8cd814b049 h1:teH578mf2ii42NHhIp3PhgvjU5bv+NFMq9fSQR8NaG8=
 github.com/fatedier/golib v0.0.0-20181107124048-ff8cd814b049/go.mod h1:DqIrnl0rp3Zybg9zbJmozTy1n8fYJoX+QoAj9slIkKM=
 github.com/fatedier/golib v0.0.0-20181107124048-ff8cd814b049/go.mod h1:DqIrnl0rp3Zybg9zbJmozTy1n8fYJoX+QoAj9slIkKM=
 github.com/fatedier/kcp-go v2.0.4-0.20190803094908-fe8645b0a904+incompatible h1:ssXat9YXFvigNge/IkkZvFMn8yeYKFX+uI6wn2mLJ74=
 github.com/fatedier/kcp-go v2.0.4-0.20190803094908-fe8645b0a904+incompatible h1:ssXat9YXFvigNge/IkkZvFMn8yeYKFX+uI6wn2mLJ74=
 github.com/fatedier/kcp-go v2.0.4-0.20190803094908-fe8645b0a904+incompatible/go.mod h1:YpCOaxj7vvMThhIQ9AfTOPW2sfztQR5WDfs7AflSy4s=
 github.com/fatedier/kcp-go v2.0.4-0.20190803094908-fe8645b0a904+incompatible/go.mod h1:YpCOaxj7vvMThhIQ9AfTOPW2sfztQR5WDfs7AflSy4s=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
 github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
 github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
 github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
 github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
 github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
 github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
 github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
 github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
 github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
 github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
 github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ=
 github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ=
 github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
 github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
-github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
 github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
 github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
 github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE=
 github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE=
 github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
 github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
 github.com/klauspost/reedsolomon v1.9.1 h1:kYrT1MlR4JH6PqOpC+okdb9CDTcwEC/BqpzK4WFyXL8=
 github.com/klauspost/reedsolomon v1.9.1 h1:kYrT1MlR4JH6PqOpC+okdb9CDTcwEC/BqpzK4WFyXL8=
 github.com/klauspost/reedsolomon v1.9.1/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ/pcNv7fu+8Un4=
 github.com/klauspost/reedsolomon v1.9.1/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ/pcNv7fu+8Un4=
-github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
 github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
 github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
 github.com/pires/go-proxyproto v0.0.0-20190111085350-4d51b51e3bfc h1:lNOt1SMsgHXTdpuGw+RpnJtzUcCb/oRKZP65pBy9pr8=
 github.com/pires/go-proxyproto v0.0.0-20190111085350-4d51b51e3bfc h1:lNOt1SMsgHXTdpuGw+RpnJtzUcCb/oRKZP65pBy9pr8=
 github.com/pires/go-proxyproto v0.0.0-20190111085350-4d51b51e3bfc/go.mod h1:6/gX3+E/IYGa0wMORlSMla999awQFdbaeQCHjSMKIzY=
 github.com/pires/go-proxyproto v0.0.0-20190111085350-4d51b51e3bfc/go.mod h1:6/gX3+E/IYGa0wMORlSMla999awQFdbaeQCHjSMKIzY=
 github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 h1:J9b7z+QKAmPf4YLrFg6oQUotqHQeUNWwkvo7jZp1GLU=
+github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.4.1 h1:FFSuS004yOQEtDdTq+TAOLP5xUq63KqAFYyOi8zA+Y8=
+github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
 github.com/rakyll/statik v0.1.1/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs=
 github.com/rakyll/statik v0.1.1/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs=
 github.com/rodaine/table v1.0.0/go.mod h1:YAUzwPOji0DUJNEvggdxyQcUAl4g3hDRcFlyjnnR51I=
 github.com/rodaine/table v1.0.0/go.mod h1:YAUzwPOji0DUJNEvggdxyQcUAl4g3hDRcFlyjnnR51I=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
 github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
 github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
 github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
 github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
 github.com/templexxx/cpufeat v0.0.0-20170927014610-3794dfbfb047/go.mod h1:wM7WEvslTq+iOEAMDLSzhVuOt5BRZ05WirO+b09GHQU=
 github.com/templexxx/cpufeat v0.0.0-20170927014610-3794dfbfb047/go.mod h1:wM7WEvslTq+iOEAMDLSzhVuOt5BRZ05WirO+b09GHQU=
 github.com/templexxx/xor v0.0.0-20170926022130-0af8e873c554/go.mod h1:5XA7W9S6mni3h5uvOC75dA3m9CCCaS83lltmc0ukdi4=
 github.com/templexxx/xor v0.0.0-20170926022130-0af8e873c554/go.mod h1:5XA7W9S6mni3h5uvOC75dA3m9CCCaS83lltmc0ukdi4=
 github.com/tjfoc/gmsm v0.0.0-20171124023159-98aa888b79d8/go.mod h1:XxO4hdhhrzAd+G4CjDqaOkd0hUzmtPR/d3EiBBMn/wc=
 github.com/tjfoc/gmsm v0.0.0-20171124023159-98aa888b79d8/go.mod h1:XxO4hdhhrzAd+G4CjDqaOkd0hUzmtPR/d3EiBBMn/wc=
 github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw=
 github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw=
-github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae h1:J0GxkO96kL4WF+AIT3M4mfUVinOCPgf2uUWYFUzN0sM=
 github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae/go.mod h1:gXtu8J62kEgmN++bm9BVICuT/e8yiLI2KFobd/TRFsE=
 github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae/go.mod h1:gXtu8J62kEgmN++bm9BVICuT/e8yiLI2KFobd/TRFsE=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
 golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
 golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
 golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
 golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
 golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
 golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/square/go-jose.v2 v2.4.1 h1:H0TmLt7/KmzlrDOpa1F+zr0Tk90PbJYBfsVUmRLrf9Y=
+gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

+ 151 - 0
models/auth/auth.go

@@ -0,0 +1,151 @@
+// Copyright 2020 guylewin, guy@lewin.co.il
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+	"fmt"
+
+	"github.com/fatedier/frp/models/consts"
+	"github.com/fatedier/frp/models/msg"
+
+	"github.com/vaughan0/go-ini"
+)
+
+type baseConfig struct {
+	// AuthenticationMethod specifies what authentication method to use to
+	// authenticate frpc with frps. If "token" is specified - token will be
+	// read into login message. If "oidc" is specified - OIDC (Open ID Connect)
+	// token will be issued using OIDC settings. By default, this value is "token".
+	AuthenticationMethod string `json:"authentication_method"`
+	// AuthenticateHeartBeats specifies whether to include authentication token in
+	// heartbeats sent to frps. By default, this value is false.
+	AuthenticateHeartBeats bool `json:"authenticate_heartbeats"`
+	// AuthenticateNewWorkConns specifies whether to include authentication token in
+	// new work connections sent to frps. By default, this value is false.
+	AuthenticateNewWorkConns bool `json:"authenticate_new_work_conns"`
+}
+
+func getDefaultBaseConf() baseConfig {
+	return baseConfig{
+		AuthenticationMethod:     "token",
+		AuthenticateHeartBeats:   false,
+		AuthenticateNewWorkConns: false,
+	}
+}
+
+func unmarshalBaseConfFromIni(conf ini.File) baseConfig {
+	var (
+		tmpStr string
+		ok     bool
+	)
+
+	cfg := getDefaultBaseConf()
+
+	if tmpStr, ok = conf.Get("common", "authentication_method"); ok {
+		cfg.AuthenticationMethod = tmpStr
+	}
+
+	if tmpStr, ok = conf.Get("common", "authenticate_heartbeats"); ok && tmpStr == "true" {
+		cfg.AuthenticateHeartBeats = true
+	} else {
+		cfg.AuthenticateHeartBeats = false
+	}
+
+	if tmpStr, ok = conf.Get("common", "authenticate_new_work_conns"); ok && tmpStr == "true" {
+		cfg.AuthenticateNewWorkConns = true
+	} else {
+		cfg.AuthenticateNewWorkConns = false
+	}
+
+	return cfg
+}
+
+type AuthClientConfig struct {
+	baseConfig
+	oidcClientConfig
+	tokenConfig
+}
+
+func GetDefaultAuthClientConf() AuthClientConfig {
+	return AuthClientConfig{
+		baseConfig:       getDefaultBaseConf(),
+		oidcClientConfig: getDefaultOidcClientConf(),
+		tokenConfig:      getDefaultTokenConf(),
+	}
+}
+
+func UnmarshalAuthClientConfFromIni(conf ini.File) (cfg AuthClientConfig) {
+	cfg.baseConfig = unmarshalBaseConfFromIni(conf)
+	cfg.oidcClientConfig = unmarshalOidcClientConfFromIni(conf)
+	cfg.tokenConfig = unmarshalTokenConfFromIni(conf)
+	return cfg
+}
+
+type AuthServerConfig struct {
+	baseConfig
+	oidcServerConfig
+	tokenConfig
+}
+
+func GetDefaultAuthServerConf() AuthServerConfig {
+	return AuthServerConfig{
+		baseConfig:       getDefaultBaseConf(),
+		oidcServerConfig: getDefaultOidcServerConf(),
+		tokenConfig:      getDefaultTokenConf(),
+	}
+}
+
+func UnmarshalAuthServerConfFromIni(conf ini.File) (cfg AuthServerConfig) {
+	cfg.baseConfig = unmarshalBaseConfFromIni(conf)
+	cfg.oidcServerConfig = unmarshalOidcServerConfFromIni(conf)
+	cfg.tokenConfig = unmarshalTokenConfFromIni(conf)
+	return cfg
+}
+
+type Setter interface {
+	SetLogin(*msg.Login) error
+	SetPing(*msg.Ping) error
+	SetNewWorkConn(*msg.NewWorkConn) error
+}
+
+func NewAuthSetter(cfg AuthClientConfig) (authProvider Setter) {
+	switch cfg.AuthenticationMethod {
+	case consts.TokenAuthMethod:
+		authProvider = NewTokenAuth(cfg.baseConfig, cfg.tokenConfig)
+	case consts.OidcAuthMethod:
+		authProvider = NewOidcAuthSetter(cfg.baseConfig, cfg.oidcClientConfig)
+	default:
+		panic(fmt.Sprintf("wrong authentication method: '%s'", cfg.AuthenticationMethod))
+	}
+
+	return authProvider
+}
+
+type Verifier interface {
+	VerifyLogin(*msg.Login) error
+	VerifyPing(*msg.Ping) error
+	VerifyNewWorkConn(*msg.NewWorkConn) error
+}
+
+func NewAuthVerifier(cfg AuthServerConfig) (authVerifier Verifier) {
+	switch cfg.AuthenticationMethod {
+	case consts.TokenAuthMethod:
+		authVerifier = NewTokenAuth(cfg.baseConfig, cfg.tokenConfig)
+	case consts.OidcAuthMethod:
+		authVerifier = NewOidcAuthVerifier(cfg.baseConfig, cfg.oidcServerConfig)
+	}
+
+	return authVerifier
+}

+ 255 - 0
models/auth/oidc.go

@@ -0,0 +1,255 @@
+// Copyright 2020 guylewin, guy@lewin.co.il
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/fatedier/frp/models/msg"
+
+	"github.com/coreos/go-oidc"
+	"github.com/vaughan0/go-ini"
+	"golang.org/x/oauth2/clientcredentials"
+)
+
+type oidcClientConfig struct {
+	// OidcClientId specifies the client ID to use to get a token in OIDC
+	// authentication if AuthenticationMethod == "oidc". By default, this value
+	// is "".
+	OidcClientId string `json:"oidc_client_id"`
+	// OidcClientSecret specifies the client secret to use to get a token in OIDC
+	// authentication if AuthenticationMethod == "oidc". By default, this value
+	// is "".
+	OidcClientSecret string `json:"oidc_client_secret"`
+	// OidcAudience specifies the audience of the token in OIDC authentication
+	//if AuthenticationMethod == "oidc". By default, this value is "".
+	OidcAudience string `json:"oidc_audience"`
+	// OidcTokenEndpointUrl specifies the URL which implements OIDC Token Endpoint.
+	// It will be used to get an OIDC token if AuthenticationMethod == "oidc".
+	// By default, this value is "".
+	OidcTokenEndpointUrl string `json:"oidc_token_endpoint_url"`
+}
+
+func getDefaultOidcClientConf() oidcClientConfig {
+	return oidcClientConfig{
+		OidcClientId:         "",
+		OidcClientSecret:     "",
+		OidcAudience:         "",
+		OidcTokenEndpointUrl: "",
+	}
+}
+
+func unmarshalOidcClientConfFromIni(conf ini.File) oidcClientConfig {
+	var (
+		tmpStr string
+		ok     bool
+	)
+
+	cfg := getDefaultOidcClientConf()
+
+	if tmpStr, ok = conf.Get("common", "oidc_client_id"); ok {
+		cfg.OidcClientId = tmpStr
+	}
+
+	if tmpStr, ok = conf.Get("common", "oidc_client_secret"); ok {
+		cfg.OidcClientSecret = tmpStr
+	}
+
+	if tmpStr, ok = conf.Get("common", "oidc_audience"); ok {
+		cfg.OidcAudience = tmpStr
+	}
+
+	if tmpStr, ok = conf.Get("common", "oidc_token_endpoint_url"); ok {
+		cfg.OidcTokenEndpointUrl = tmpStr
+	}
+
+	return cfg
+}
+
+type oidcServerConfig struct {
+	// OidcIssuer specifies the issuer to verify OIDC tokens with. This issuer
+	// will be used to load public keys to verify signature and will be compared
+	// with the issuer claim in the OIDC token. It will be used if
+	// AuthenticationMethod == "oidc". By default, this value is "".
+	OidcIssuer string `json:"oidc_issuer"`
+	// OidcAudience specifies the audience OIDC tokens should contain when validated.
+	// If this value is empty, audience ("client ID") verification will be skipped.
+	// It will be used when AuthenticationMethod == "oidc". By default, this
+	// value is "".
+	OidcAudience string `json:"oidc_audience"`
+	// OidcSkipExpiryCheck specifies whether to skip checking if the OIDC token is
+	// expired. It will be used when AuthenticationMethod == "oidc". By default, this
+	// value is false.
+	OidcSkipExpiryCheck bool `json:"oidc_skip_expiry_check"`
+	// OidcSkipIssuerCheck specifies whether to skip checking if the OIDC token's
+	// issuer claim matches the issuer specified in OidcIssuer. It will be used when
+	// AuthenticationMethod == "oidc". By default, this value is false.
+	OidcSkipIssuerCheck bool `json:"oidc_skip_issuer_check"`
+}
+
+func getDefaultOidcServerConf() oidcServerConfig {
+	return oidcServerConfig{
+		OidcIssuer:          "",
+		OidcAudience:        "",
+		OidcSkipExpiryCheck: false,
+		OidcSkipIssuerCheck: false,
+	}
+}
+
+func unmarshalOidcServerConfFromIni(conf ini.File) oidcServerConfig {
+	var (
+		tmpStr string
+		ok     bool
+	)
+
+	cfg := getDefaultOidcServerConf()
+
+	if tmpStr, ok = conf.Get("common", "oidc_issuer"); ok {
+		cfg.OidcIssuer = tmpStr
+	}
+
+	if tmpStr, ok = conf.Get("common", "oidc_audience"); ok {
+		cfg.OidcAudience = tmpStr
+	}
+
+	if tmpStr, ok = conf.Get("common", "oidc_skip_expiry_check"); ok && tmpStr == "true" {
+		cfg.OidcSkipExpiryCheck = true
+	} else {
+		cfg.OidcSkipExpiryCheck = false
+	}
+
+	if tmpStr, ok = conf.Get("common", "oidc_skip_issuer_check"); ok && tmpStr == "true" {
+		cfg.OidcSkipIssuerCheck = true
+	} else {
+		cfg.OidcSkipIssuerCheck = false
+	}
+
+	return cfg
+}
+
+type OidcAuthProvider struct {
+	baseConfig
+
+	tokenGenerator *clientcredentials.Config
+}
+
+func NewOidcAuthSetter(baseCfg baseConfig, cfg oidcClientConfig) *OidcAuthProvider {
+	tokenGenerator := &clientcredentials.Config{
+		ClientID:     cfg.OidcClientId,
+		ClientSecret: cfg.OidcClientSecret,
+		Scopes:       []string{cfg.OidcAudience},
+		TokenURL:     cfg.OidcTokenEndpointUrl,
+	}
+
+	return &OidcAuthProvider{
+		baseConfig:     baseCfg,
+		tokenGenerator: tokenGenerator,
+	}
+}
+
+func (auth *OidcAuthProvider) generateAccessToken() (accessToken string, err error) {
+	tokenObj, err := auth.tokenGenerator.Token(context.Background())
+	if err != nil {
+		return "", fmt.Errorf("couldn't generate OIDC token for login: %v", err)
+	}
+	return tokenObj.AccessToken, nil
+}
+
+func (auth *OidcAuthProvider) SetLogin(loginMsg *msg.Login) (err error) {
+	loginMsg.PrivilegeKey, err = auth.generateAccessToken()
+	return err
+}
+
+func (auth *OidcAuthProvider) SetPing(pingMsg *msg.Ping) (err error) {
+	if !auth.AuthenticateHeartBeats {
+		return nil
+	}
+
+	pingMsg.PrivilegeKey, err = auth.generateAccessToken()
+	return err
+}
+
+func (auth *OidcAuthProvider) SetNewWorkConn(newWorkConnMsg *msg.NewWorkConn) (err error) {
+	if !auth.AuthenticateNewWorkConns {
+		return nil
+	}
+
+	newWorkConnMsg.PrivilegeKey, err = auth.generateAccessToken()
+	return err
+}
+
+type OidcAuthConsumer struct {
+	baseConfig
+
+	verifier         *oidc.IDTokenVerifier
+	subjectFromLogin string
+}
+
+func NewOidcAuthVerifier(baseCfg baseConfig, cfg oidcServerConfig) *OidcAuthConsumer {
+	provider, err := oidc.NewProvider(context.Background(), cfg.OidcIssuer)
+	if err != nil {
+		panic(err)
+	}
+	verifierConf := oidc.Config{
+		ClientID:          cfg.OidcAudience,
+		SkipClientIDCheck: cfg.OidcAudience == "",
+		SkipExpiryCheck:   cfg.OidcSkipExpiryCheck,
+		SkipIssuerCheck:   cfg.OidcSkipIssuerCheck,
+	}
+	return &OidcAuthConsumer{
+		baseConfig: baseCfg,
+		verifier:   provider.Verifier(&verifierConf),
+	}
+}
+
+func (auth *OidcAuthConsumer) VerifyLogin(loginMsg *msg.Login) (err error) {
+	token, err := auth.verifier.Verify(context.Background(), loginMsg.PrivilegeKey)
+	if err != nil {
+		return fmt.Errorf("invalid OIDC token in login: %v", err)
+	}
+	auth.subjectFromLogin = token.Subject
+	return nil
+}
+
+func (auth *OidcAuthConsumer) verifyPostLoginToken(privilegeKey string) (err error) {
+	token, err := auth.verifier.Verify(context.Background(), privilegeKey)
+	if err != nil {
+		return fmt.Errorf("invalid OIDC token in ping: %v", err)
+	}
+	if token.Subject != auth.subjectFromLogin {
+		return fmt.Errorf("received different OIDC subject in login and ping. "+
+			"original subject: %s, "+
+			"new subject: %s",
+			auth.subjectFromLogin, token.Subject)
+	}
+	return nil
+}
+
+func (auth *OidcAuthConsumer) VerifyPing(pingMsg *msg.Ping) (err error) {
+	if !auth.AuthenticateHeartBeats {
+		return nil
+	}
+
+	return auth.verifyPostLoginToken(pingMsg.PrivilegeKey)
+}
+
+func (auth *OidcAuthConsumer) VerifyNewWorkConn(newWorkConnMsg *msg.NewWorkConn) (err error) {
+	if !auth.AuthenticateNewWorkConns {
+		return nil
+	}
+
+	return auth.verifyPostLoginToken(newWorkConnMsg.PrivilegeKey)
+}

+ 120 - 0
models/auth/token.go

@@ -0,0 +1,120 @@
+// Copyright 2020 guylewin, guy@lewin.co.il
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/fatedier/frp/models/msg"
+	"github.com/fatedier/frp/utils/util"
+
+	"github.com/vaughan0/go-ini"
+)
+
+type tokenConfig struct {
+	// Token specifies the authorization token used to create keys to be sent
+	// to the server. The server must have a matching token for authorization
+	// to succeed.  By default, this value is "".
+	Token string `json:"token"`
+}
+
+func getDefaultTokenConf() tokenConfig {
+	return tokenConfig{
+		Token: "",
+	}
+}
+
+func unmarshalTokenConfFromIni(conf ini.File) tokenConfig {
+	var (
+		tmpStr string
+		ok     bool
+	)
+
+	cfg := getDefaultTokenConf()
+
+	if tmpStr, ok = conf.Get("common", "token"); ok {
+		cfg.Token = tmpStr
+	}
+
+	return cfg
+}
+
+type TokenAuthSetterVerifier struct {
+	baseConfig
+
+	token string
+}
+
+func NewTokenAuth(baseCfg baseConfig, cfg tokenConfig) *TokenAuthSetterVerifier {
+	return &TokenAuthSetterVerifier{
+		baseConfig: baseCfg,
+		token:      cfg.Token,
+	}
+}
+
+func (auth *TokenAuthSetterVerifier) SetLogin(loginMsg *msg.Login) (err error) {
+	loginMsg.PrivilegeKey = util.GetAuthKey(auth.token, loginMsg.Timestamp)
+	return nil
+}
+
+func (auth *TokenAuthSetterVerifier) SetPing(pingMsg *msg.Ping) error {
+	if !auth.AuthenticateHeartBeats {
+		return nil
+	}
+
+	pingMsg.Timestamp = time.Now().Unix()
+	pingMsg.PrivilegeKey = util.GetAuthKey(auth.token, pingMsg.Timestamp)
+	return nil
+}
+
+func (auth *TokenAuthSetterVerifier) SetNewWorkConn(newWorkConnMsg *msg.NewWorkConn) error {
+	if !auth.AuthenticateHeartBeats {
+		return nil
+	}
+
+	newWorkConnMsg.Timestamp = time.Now().Unix()
+	newWorkConnMsg.PrivilegeKey = util.GetAuthKey(auth.token, newWorkConnMsg.Timestamp)
+	return nil
+}
+
+func (auth *TokenAuthSetterVerifier) VerifyLogin(loginMsg *msg.Login) error {
+	if util.GetAuthKey(auth.token, loginMsg.Timestamp) != loginMsg.PrivilegeKey {
+		return fmt.Errorf("token in login doesn't match token from configuration")
+	}
+	return nil
+}
+
+func (auth *TokenAuthSetterVerifier) VerifyPing(pingMsg *msg.Ping) error {
+	if !auth.AuthenticateHeartBeats {
+		return nil
+	}
+
+	if util.GetAuthKey(auth.token, pingMsg.Timestamp) != pingMsg.PrivilegeKey {
+		return fmt.Errorf("token in heartbeat doesn't match token from configuration")
+	}
+	return nil
+}
+
+func (auth *TokenAuthSetterVerifier) VerifyNewWorkConn(newWorkConnMsg *msg.NewWorkConn) error {
+	if !auth.AuthenticateNewWorkConns {
+		return nil
+	}
+
+	if util.GetAuthKey(auth.token, newWorkConnMsg.Timestamp) != newWorkConnMsg.PrivilegeKey {
+		return fmt.Errorf("token in NewWorkConn doesn't match token from configuration")
+	}
+	return nil
+}

+ 5 - 9
models/config/client_common.go

@@ -21,12 +21,15 @@ import (
 	"strings"
 	"strings"
 
 
 	ini "github.com/vaughan0/go-ini"
 	ini "github.com/vaughan0/go-ini"
+
+	"github.com/fatedier/frp/models/auth"
 )
 )
 
 
 // ClientCommonConf contains information for a client service. It is
 // ClientCommonConf contains information for a client service. It is
 // recommended to use GetDefaultClientConf instead of creating this object
 // recommended to use GetDefaultClientConf instead of creating this object
 // directly, so that all unspecified fields have reasonable default values.
 // directly, so that all unspecified fields have reasonable default values.
 type ClientCommonConf struct {
 type ClientCommonConf struct {
+	auth.AuthClientConfig
 	// ServerAddr specifies the address of the server to connect to. By
 	// ServerAddr specifies the address of the server to connect to. By
 	// default, this value is "0.0.0.0".
 	// default, this value is "0.0.0.0".
 	ServerAddr string `json:"server_addr"`
 	ServerAddr string `json:"server_addr"`
@@ -56,10 +59,6 @@ type ClientCommonConf struct {
 	// DisableLogColor disables log colors when LogWay == "console" when set to
 	// DisableLogColor disables log colors when LogWay == "console" when set to
 	// true. By default, this value is false.
 	// true. By default, this value is false.
 	DisableLogColor bool `json:"disable_log_color"`
 	DisableLogColor bool `json:"disable_log_color"`
-	// Token specifies the authorization token used to create keys to be sent
-	// to the server. The server must have a matching token for authorization
-	// to succeed.  By default, this value is "".
-	Token string `json:"token"`
 	// AdminAddr specifies the address that the admin server binds to. By
 	// AdminAddr specifies the address that the admin server binds to. By
 	// default, this value is "127.0.0.1".
 	// default, this value is "127.0.0.1".
 	AdminAddr string `json:"admin_addr"`
 	AdminAddr string `json:"admin_addr"`
@@ -130,7 +129,6 @@ func GetDefaultClientConf() ClientCommonConf {
 		LogLevel:          "info",
 		LogLevel:          "info",
 		LogMaxDays:        3,
 		LogMaxDays:        3,
 		DisableLogColor:   false,
 		DisableLogColor:   false,
-		Token:             "",
 		AdminAddr:         "127.0.0.1",
 		AdminAddr:         "127.0.0.1",
 		AdminPort:         0,
 		AdminPort:         0,
 		AdminUser:         "",
 		AdminUser:         "",
@@ -158,6 +156,8 @@ func UnmarshalClientConfFromIni(content string) (cfg ClientCommonConf, err error
 		return ClientCommonConf{}, fmt.Errorf("parse ini conf file error: %v", err)
 		return ClientCommonConf{}, fmt.Errorf("parse ini conf file error: %v", err)
 	}
 	}
 
 
+	cfg.AuthClientConfig = auth.UnmarshalAuthClientConfFromIni(conf)
+
 	var (
 	var (
 		tmpStr string
 		tmpStr string
 		ok     bool
 		ok     bool
@@ -203,10 +203,6 @@ func UnmarshalClientConfFromIni(content string) (cfg ClientCommonConf, err error
 		}
 		}
 	}
 	}
 
 
-	if tmpStr, ok = conf.Get("common", "token"); ok {
-		cfg.Token = tmpStr
-	}
-
 	if tmpStr, ok = conf.Get("common", "admin_addr"); ok {
 	if tmpStr, ok = conf.Get("common", "admin_addr"); ok {
 		cfg.AdminAddr = tmpStr
 		cfg.AdminAddr = tmpStr
 	}
 	}

+ 80 - 1
models/config/proxy.go

@@ -34,6 +34,7 @@ var (
 func init() {
 func init() {
 	proxyConfTypeMap = make(map[string]reflect.Type)
 	proxyConfTypeMap = make(map[string]reflect.Type)
 	proxyConfTypeMap[consts.TcpProxy] = reflect.TypeOf(TcpProxyConf{})
 	proxyConfTypeMap[consts.TcpProxy] = reflect.TypeOf(TcpProxyConf{})
+	proxyConfTypeMap[consts.TcpMuxProxy] = reflect.TypeOf(TcpMuxProxyConf{})
 	proxyConfTypeMap[consts.UdpProxy] = reflect.TypeOf(UdpProxyConf{})
 	proxyConfTypeMap[consts.UdpProxy] = reflect.TypeOf(UdpProxyConf{})
 	proxyConfTypeMap[consts.HttpProxy] = reflect.TypeOf(HttpProxyConf{})
 	proxyConfTypeMap[consts.HttpProxy] = reflect.TypeOf(HttpProxyConf{})
 	proxyConfTypeMap[consts.HttpsProxy] = reflect.TypeOf(HttpsProxyConf{})
 	proxyConfTypeMap[consts.HttpsProxy] = reflect.TypeOf(HttpsProxyConf{})
@@ -149,7 +150,7 @@ func (cfg *BaseProxyConf) compare(cmp *BaseProxyConf) bool {
 		cfg.Group != cmp.Group ||
 		cfg.Group != cmp.Group ||
 		cfg.GroupKey != cmp.GroupKey ||
 		cfg.GroupKey != cmp.GroupKey ||
 		cfg.ProxyProtocolVersion != cmp.ProxyProtocolVersion ||
 		cfg.ProxyProtocolVersion != cmp.ProxyProtocolVersion ||
-		cfg.BandwidthLimit.Equal(&cmp.BandwidthLimit) ||
+		!cfg.BandwidthLimit.Equal(&cmp.BandwidthLimit) ||
 		!reflect.DeepEqual(cfg.Metas, cmp.Metas) {
 		!reflect.DeepEqual(cfg.Metas, cmp.Metas) {
 		return false
 		return false
 	}
 	}
@@ -574,6 +575,84 @@ func (cfg *TcpProxyConf) CheckForCli() (err error) {
 
 
 func (cfg *TcpProxyConf) CheckForSvr(serverCfg ServerCommonConf) error { return nil }
 func (cfg *TcpProxyConf) CheckForSvr(serverCfg ServerCommonConf) error { return nil }
 
 
+// TCP Multiplexer
+type TcpMuxProxyConf struct {
+	BaseProxyConf
+	DomainConf
+
+	Multiplexer string `json:"multiplexer"`
+}
+
+func (cfg *TcpMuxProxyConf) Compare(cmp ProxyConf) bool {
+	cmpConf, ok := cmp.(*TcpMuxProxyConf)
+	if !ok {
+		return false
+	}
+
+	if !cfg.BaseProxyConf.compare(&cmpConf.BaseProxyConf) ||
+		!cfg.DomainConf.compare(&cmpConf.DomainConf) ||
+		cfg.Multiplexer != cmpConf.Multiplexer {
+		return false
+	}
+	return true
+}
+
+func (cfg *TcpMuxProxyConf) UnmarshalFromMsg(pMsg *msg.NewProxy) {
+	cfg.BaseProxyConf.UnmarshalFromMsg(pMsg)
+	cfg.DomainConf.UnmarshalFromMsg(pMsg)
+	cfg.Multiplexer = pMsg.Multiplexer
+}
+
+func (cfg *TcpMuxProxyConf) UnmarshalFromIni(prefix string, name string, section ini.Section) (err error) {
+	if err = cfg.BaseProxyConf.UnmarshalFromIni(prefix, name, section); err != nil {
+		return
+	}
+	if err = cfg.DomainConf.UnmarshalFromIni(prefix, name, section); err != nil {
+		return
+	}
+
+	cfg.Multiplexer = section["multiplexer"]
+	if cfg.Multiplexer != consts.HttpConnectTcpMultiplexer {
+		return fmt.Errorf("parse conf error: proxy [%s] incorrect multiplexer [%s]", name, cfg.Multiplexer)
+	}
+	return
+}
+
+func (cfg *TcpMuxProxyConf) MarshalToMsg(pMsg *msg.NewProxy) {
+	cfg.BaseProxyConf.MarshalToMsg(pMsg)
+	cfg.DomainConf.MarshalToMsg(pMsg)
+	pMsg.Multiplexer = cfg.Multiplexer
+}
+
+func (cfg *TcpMuxProxyConf) CheckForCli() (err error) {
+	if err = cfg.BaseProxyConf.checkForCli(); err != nil {
+		return err
+	}
+	if err = cfg.DomainConf.checkForCli(); err != nil {
+		return err
+	}
+	if cfg.Multiplexer != consts.HttpConnectTcpMultiplexer {
+		return fmt.Errorf("parse conf error: incorrect multiplexer [%s]", cfg.Multiplexer)
+	}
+	return
+}
+
+func (cfg *TcpMuxProxyConf) CheckForSvr(serverCfg ServerCommonConf) (err error) {
+	if cfg.Multiplexer != consts.HttpConnectTcpMultiplexer {
+		return fmt.Errorf("proxy [%s] incorrect multiplexer [%s]", cfg.ProxyName, cfg.Multiplexer)
+	}
+
+	if cfg.Multiplexer == consts.HttpConnectTcpMultiplexer && serverCfg.TcpMuxHttpConnectPort == 0 {
+		return fmt.Errorf("proxy [%s] type [tcpmux] with multiplexer [httpconnect] requires tcpmux_httpconnect_port configuration", cfg.ProxyName)
+	}
+
+	if err = cfg.DomainConf.checkForSvr(serverCfg); err != nil {
+		err = fmt.Errorf("proxy [%s] domain conf check error: %v", cfg.ProxyName, err)
+		return
+	}
+	return
+}
+
 // UDP
 // UDP
 type UdpProxyConf struct {
 type UdpProxyConf struct {
 	BaseProxyConf
 	BaseProxyConf

+ 76 - 38
models/config/server_common.go

@@ -21,6 +21,7 @@ import (
 
 
 	ini "github.com/vaughan0/go-ini"
 	ini "github.com/vaughan0/go-ini"
 
 
+	"github.com/fatedier/frp/models/auth"
 	plugin "github.com/fatedier/frp/models/plugin/server"
 	plugin "github.com/fatedier/frp/models/plugin/server"
 	"github.com/fatedier/frp/utils/util"
 	"github.com/fatedier/frp/utils/util"
 )
 )
@@ -29,6 +30,7 @@ import (
 // recommended to use GetDefaultServerConf instead of creating this object
 // recommended to use GetDefaultServerConf instead of creating this object
 // directly, so that all unspecified fields have reasonable default values.
 // directly, so that all unspecified fields have reasonable default values.
 type ServerCommonConf struct {
 type ServerCommonConf struct {
+	auth.AuthServerConfig
 	// BindAddr specifies the address that the server binds to. By default,
 	// BindAddr specifies the address that the server binds to. By default,
 	// this value is "0.0.0.0".
 	// this value is "0.0.0.0".
 	BindAddr string `json:"bind_addr"`
 	BindAddr string `json:"bind_addr"`
@@ -46,25 +48,25 @@ type ServerCommonConf struct {
 	// ProxyBindAddr specifies the address that the proxy binds to. This value
 	// ProxyBindAddr specifies the address that the proxy binds to. This value
 	// may be the same as BindAddr. By default, this value is "0.0.0.0".
 	// may be the same as BindAddr. By default, this value is "0.0.0.0".
 	ProxyBindAddr string `json:"proxy_bind_addr"`
 	ProxyBindAddr string `json:"proxy_bind_addr"`
-
 	// VhostHttpPort specifies the port that the server listens for HTTP Vhost
 	// VhostHttpPort specifies the port that the server listens for HTTP Vhost
 	// requests. If this value is 0, the server will not listen for HTTP
 	// requests. If this value is 0, the server will not listen for HTTP
 	// requests. By default, this value is 0.
 	// requests. By default, this value is 0.
 	VhostHttpPort int `json:"vhost_http_port"`
 	VhostHttpPort int `json:"vhost_http_port"`
-
 	// VhostHttpsPort specifies the port that the server listens for HTTPS
 	// VhostHttpsPort specifies the port that the server listens for HTTPS
 	// Vhost requests. If this value is 0, the server will not listen for HTTPS
 	// Vhost requests. If this value is 0, the server will not listen for HTTPS
 	// requests. By default, this value is 0.
 	// requests. By default, this value is 0.
 	VhostHttpsPort int `json:"vhost_https_port"`
 	VhostHttpsPort int `json:"vhost_https_port"`
-
+	// TcpMuxHttpConnectPort specifies the port that the server listens for TCP
+	// HTTP CONNECT requests. If the value is 0, the server will not multiplex TCP
+	// requests on one single port. If it's not - it will listen on this value for
+	// HTTP CONNECT requests. By default, this value is 0.
+	TcpMuxHttpConnectPort int `json:"tcpmux_httpconnect_port"`
 	// VhostHttpTimeout specifies the response header timeout for the Vhost
 	// VhostHttpTimeout specifies the response header timeout for the Vhost
 	// HTTP server, in seconds. By default, this value is 60.
 	// HTTP server, in seconds. By default, this value is 60.
 	VhostHttpTimeout int64 `json:"vhost_http_timeout"`
 	VhostHttpTimeout int64 `json:"vhost_http_timeout"`
-
 	// DashboardAddr specifies the address that the dashboard binds to. By
 	// DashboardAddr specifies the address that the dashboard binds to. By
 	// default, this value is "0.0.0.0".
 	// default, this value is "0.0.0.0".
 	DashboardAddr string `json:"dashboard_addr"`
 	DashboardAddr string `json:"dashboard_addr"`
-
 	// DashboardPort specifies the port that the dashboard listens on. If this
 	// DashboardPort specifies the port that the dashboard listens on. If this
 	// value is 0, the dashboard will not be started. By default, this value is
 	// value is 0, the dashboard will not be started. By default, this value is
 	// 0.
 	// 0.
@@ -75,6 +77,9 @@ type ServerCommonConf struct {
 	// DashboardUser specifies the password that the dashboard will use for
 	// DashboardUser specifies the password that the dashboard will use for
 	// login. By default, this value is "admin".
 	// login. By default, this value is "admin".
 	DashboardPwd string `json:"dashboard_pwd"`
 	DashboardPwd string `json:"dashboard_pwd"`
+	// EnablePrometheus will export prometheus metrics on {dashboard_addr}:{dashboard_port}
+	// in /metrics api.
+	EnablePrometheus bool `json:"enable_prometheus"`
 	// AssetsDir specifies the local directory that the dashboard will load
 	// AssetsDir specifies the local directory that the dashboard will load
 	// resources from. If this value is "", assets will be loaded from the
 	// resources from. If this value is "", assets will be loaded from the
 	// bundled executable using statik. By default, this value is "".
 	// bundled executable using statik. By default, this value is "".
@@ -98,10 +103,10 @@ type ServerCommonConf struct {
 	// DisableLogColor disables log colors when LogWay == "console" when set to
 	// DisableLogColor disables log colors when LogWay == "console" when set to
 	// true. By default, this value is false.
 	// true. By default, this value is false.
 	DisableLogColor bool `json:"disable_log_color"`
 	DisableLogColor bool `json:"disable_log_color"`
-	// Token specifies the authorization token used to authenticate keys
-	// received from clients. Clients must have a matching token to be
-	// authorized to use the server. By default, this value is "".
-	Token string `json:"token"`
+	// DetailedErrorsToClient defines whether to send the specific error (with
+	// debug info) to frpc. By default, this value is true.
+	DetailedErrorsToClient bool `json:"detailed_errors_to_client"`
+
 	// SubDomainHost specifies the domain that will be attached to sub-domains
 	// SubDomainHost specifies the domain that will be attached to sub-domains
 	// requested by the client when using Vhost proxying. For example, if this
 	// requested by the client when using Vhost proxying. For example, if this
 	// value is set to "frps.com" and the client requested the subdomain
 	// value is set to "frps.com" and the client requested the subdomain
@@ -128,6 +133,9 @@ type ServerCommonConf struct {
 	// may proxy to. If this value is 0, no limit will be applied. By default,
 	// may proxy to. If this value is 0, no limit will be applied. By default,
 	// this value is 0.
 	// this value is 0.
 	MaxPortsPerClient int64 `json:"max_ports_per_client"`
 	MaxPortsPerClient int64 `json:"max_ports_per_client"`
+	// TlsOnly specifies whether to only accept TLS-encrypted connections. By
+	// default, the value is false.
+	TlsOnly bool `json:"tls_only"`
 	// HeartBeatTimeout specifies the maximum time to wait for a heartbeat
 	// HeartBeatTimeout specifies the maximum time to wait for a heartbeat
 	// before terminating the connection. It is not recommended to change this
 	// before terminating the connection. It is not recommended to change this
 	// value. By default, this value is 90.
 	// value. By default, this value is 90.
@@ -143,34 +151,37 @@ type ServerCommonConf struct {
 // defaults.
 // defaults.
 func GetDefaultServerConf() ServerCommonConf {
 func GetDefaultServerConf() ServerCommonConf {
 	return ServerCommonConf{
 	return ServerCommonConf{
-		BindAddr:          "0.0.0.0",
-		BindPort:          7000,
-		BindUdpPort:       0,
-		KcpBindPort:       0,
-		ProxyBindAddr:     "0.0.0.0",
-		VhostHttpPort:     0,
-		VhostHttpsPort:    0,
-		VhostHttpTimeout:  60,
-		DashboardAddr:     "0.0.0.0",
-		DashboardPort:     0,
-		DashboardUser:     "admin",
-		DashboardPwd:      "admin",
-		AssetsDir:         "",
-		LogFile:           "console",
-		LogWay:            "console",
-		LogLevel:          "info",
-		LogMaxDays:        3,
-		DisableLogColor:   false,
-		Token:             "",
-		SubDomainHost:     "",
-		TcpMux:            true,
-		AllowPorts:        make(map[int]struct{}),
-		MaxPoolCount:      5,
-		MaxPortsPerClient: 0,
-		HeartBeatTimeout:  90,
-		UserConnTimeout:   10,
-		Custom404Page:     "",
-		HTTPPlugins:       make(map[string]plugin.HTTPPluginOptions),
+		BindAddr:               "0.0.0.0",
+		BindPort:               7000,
+		BindUdpPort:            0,
+		KcpBindPort:            0,
+		ProxyBindAddr:          "0.0.0.0",
+		VhostHttpPort:          0,
+		VhostHttpsPort:         0,
+		TcpMuxHttpConnectPort:  0,
+		VhostHttpTimeout:       60,
+		DashboardAddr:          "0.0.0.0",
+		DashboardPort:          0,
+		DashboardUser:          "admin",
+		DashboardPwd:           "admin",
+		EnablePrometheus:       false,
+		AssetsDir:              "",
+		LogFile:                "console",
+		LogWay:                 "console",
+		LogLevel:               "info",
+		LogMaxDays:             3,
+		DisableLogColor:        false,
+		DetailedErrorsToClient: true,
+		SubDomainHost:          "",
+		TcpMux:                 true,
+		AllowPorts:             make(map[int]struct{}),
+		MaxPoolCount:           5,
+		MaxPortsPerClient:      0,
+		TlsOnly:                false,
+		HeartBeatTimeout:       90,
+		UserConnTimeout:        10,
+		Custom404Page:          "",
+		HTTPPlugins:            make(map[string]plugin.HTTPPluginOptions),
 	}
 	}
 }
 }
 
 
@@ -187,6 +198,8 @@ func UnmarshalServerConfFromIni(content string) (cfg ServerCommonConf, err error
 
 
 	UnmarshalPluginsFromIni(conf, &cfg)
 	UnmarshalPluginsFromIni(conf, &cfg)
 
 
+	cfg.AuthServerConfig = auth.UnmarshalAuthServerConfFromIni(conf)
+
 	var (
 	var (
 		tmpStr string
 		tmpStr string
 		ok     bool
 		ok     bool
@@ -251,6 +264,17 @@ func UnmarshalServerConfFromIni(content string) (cfg ServerCommonConf, err error
 		cfg.VhostHttpsPort = 0
 		cfg.VhostHttpsPort = 0
 	}
 	}
 
 
+	if tmpStr, ok = conf.Get("common", "tcpmux_httpconnect_port"); ok {
+		if v, err = strconv.ParseInt(tmpStr, 10, 64); err != nil {
+			err = fmt.Errorf("Parse conf error: invalid tcpmux_httpconnect_port")
+			return
+		} else {
+			cfg.TcpMuxHttpConnectPort = int(v)
+		}
+	} else {
+		cfg.TcpMuxHttpConnectPort = 0
+	}
+
 	if tmpStr, ok = conf.Get("common", "vhost_http_timeout"); ok {
 	if tmpStr, ok = conf.Get("common", "vhost_http_timeout"); ok {
 		v, errRet := strconv.ParseInt(tmpStr, 10, 64)
 		v, errRet := strconv.ParseInt(tmpStr, 10, 64)
 		if errRet != nil || v < 0 {
 		if errRet != nil || v < 0 {
@@ -286,6 +310,10 @@ func UnmarshalServerConfFromIni(content string) (cfg ServerCommonConf, err error
 		cfg.DashboardPwd = tmpStr
 		cfg.DashboardPwd = tmpStr
 	}
 	}
 
 
+	if tmpStr, ok = conf.Get("common", "enable_prometheus"); ok && tmpStr == "true" {
+		cfg.EnablePrometheus = true
+	}
+
 	if tmpStr, ok = conf.Get("common", "assets_dir"); ok {
 	if tmpStr, ok = conf.Get("common", "assets_dir"); ok {
 		cfg.AssetsDir = tmpStr
 		cfg.AssetsDir = tmpStr
 	}
 	}
@@ -314,7 +342,11 @@ func UnmarshalServerConfFromIni(content string) (cfg ServerCommonConf, err error
 		cfg.DisableLogColor = true
 		cfg.DisableLogColor = true
 	}
 	}
 
 
-	cfg.Token, _ = conf.Get("common", "token")
+	if tmpStr, ok = conf.Get("common", "detailed_errors_to_client"); ok && tmpStr == "false" {
+		cfg.DetailedErrorsToClient = false
+	} else {
+		cfg.DetailedErrorsToClient = true
+	}
 
 
 	if allowPortsStr, ok := conf.Get("common", "allow_ports"); ok {
 	if allowPortsStr, ok := conf.Get("common", "allow_ports"); ok {
 		// e.g. 1000-2000,2001,2002,3000-4000
 		// e.g. 1000-2000,2001,2002,3000-4000
@@ -378,6 +410,12 @@ func UnmarshalServerConfFromIni(content string) (cfg ServerCommonConf, err error
 			cfg.HeartBeatTimeout = v
 			cfg.HeartBeatTimeout = v
 		}
 		}
 	}
 	}
+
+	if tmpStr, ok = conf.Get("common", "tls_only"); ok && tmpStr == "true" {
+		cfg.TlsOnly = true
+	} else {
+		cfg.TlsOnly = false
+	}
 	return
 	return
 }
 }
 
 

+ 14 - 6
models/consts/consts.go

@@ -23,10 +23,18 @@ var (
 	Offline string = "offline"
 	Offline string = "offline"
 
 
 	// proxy type
 	// proxy type
-	TcpProxy   string = "tcp"
-	UdpProxy   string = "udp"
-	HttpProxy  string = "http"
-	HttpsProxy string = "https"
-	StcpProxy  string = "stcp"
-	XtcpProxy  string = "xtcp"
+	TcpProxy    string = "tcp"
+	UdpProxy    string = "udp"
+	TcpMuxProxy string = "tcpmux"
+	HttpProxy   string = "http"
+	HttpsProxy  string = "https"
+	StcpProxy   string = "stcp"
+	XtcpProxy   string = "xtcp"
+
+	// authentication method
+	TokenAuthMethod string = "token"
+	OidcAuthMethod  string = "oidc"
+
+	// tcp multiplexer
+	HttpConnectTcpMultiplexer string = "httpconnect"
 )
 )

+ 93 - 0
models/metrics/aggregate/server.go

@@ -0,0 +1,93 @@
+// Copyright 2020 fatedier, fatedier@gmail.com
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aggregate
+
+import (
+	"github.com/fatedier/frp/models/metrics/mem"
+	"github.com/fatedier/frp/models/metrics/prometheus"
+	"github.com/fatedier/frp/server/metrics"
+)
+
+// EnableMem start to mark metrics to memory monitor system.
+func EnableMem() {
+	sm.Add(mem.ServerMetrics)
+}
+
+// EnablePrometheus start to mark metrics to prometheus.
+func EnablePrometheus() {
+	sm.Add(prometheus.ServerMetrics)
+}
+
+var sm *serverMetrics = &serverMetrics{}
+
+func init() {
+	metrics.Register(sm)
+}
+
+type serverMetrics struct {
+	ms []metrics.ServerMetrics
+}
+
+func (m *serverMetrics) Add(sm metrics.ServerMetrics) {
+	m.ms = append(m.ms, sm)
+}
+
+func (m *serverMetrics) NewClient() {
+	for _, v := range m.ms {
+		v.NewClient()
+	}
+}
+
+func (m *serverMetrics) CloseClient() {
+	for _, v := range m.ms {
+		v.CloseClient()
+	}
+}
+
+func (m *serverMetrics) NewProxy(name string, proxyType string) {
+	for _, v := range m.ms {
+		v.NewProxy(name, proxyType)
+	}
+}
+
+func (m *serverMetrics) CloseProxy(name string, proxyType string) {
+	for _, v := range m.ms {
+		v.CloseProxy(name, proxyType)
+	}
+}
+
+func (m *serverMetrics) OpenConnection(name string, proxyType string) {
+	for _, v := range m.ms {
+		v.OpenConnection(name, proxyType)
+	}
+}
+
+func (m *serverMetrics) CloseConnection(name string, proxyType string) {
+	for _, v := range m.ms {
+		v.CloseConnection(name, proxyType)
+	}
+}
+
+func (m *serverMetrics) AddTrafficIn(name string, proxyType string, trafficBytes int64) {
+	for _, v := range m.ms {
+		v.AddTrafficIn(name, proxyType, trafficBytes)
+	}
+}
+
+func (m *serverMetrics) AddTrafficOut(name string, proxyType string, trafficBytes int64) {
+	for _, v := range m.ms {
+		v.AddTrafficOut(name, proxyType, trafficBytes)
+	}
+}

+ 262 - 0
models/metrics/mem/server.go

@@ -0,0 +1,262 @@
+// Copyright 2019 fatedier, fatedier@gmail.com
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+import (
+	"sync"
+	"time"
+
+	server "github.com/fatedier/frp/server/metrics"
+	"github.com/fatedier/frp/utils/log"
+	"github.com/fatedier/frp/utils/metric"
+)
+
+var sm *serverMetrics = newServerMetrics()
+var ServerMetrics server.ServerMetrics
+var StatsCollector Collector
+
+func init() {
+	ServerMetrics = sm
+	StatsCollector = sm
+	sm.run()
+}
+
+type serverMetrics struct {
+	info *ServerStatistics
+	mu   sync.Mutex
+}
+
+func newServerMetrics() *serverMetrics {
+	return &serverMetrics{
+		info: &ServerStatistics{
+			TotalTrafficIn:  metric.NewDateCounter(ReserveDays),
+			TotalTrafficOut: metric.NewDateCounter(ReserveDays),
+			CurConns:        metric.NewCounter(),
+
+			ClientCounts:    metric.NewCounter(),
+			ProxyTypeCounts: make(map[string]metric.Counter),
+
+			ProxyStatistics: make(map[string]*ProxyStatistics),
+		},
+	}
+}
+
+func (m *serverMetrics) run() {
+	go func() {
+		for {
+			time.Sleep(12 * time.Hour)
+			log.Debug("start to clear useless proxy statistics data...")
+			m.clearUselessInfo()
+			log.Debug("finish to clear useless proxy statistics data")
+		}
+	}()
+}
+
+func (m *serverMetrics) clearUselessInfo() {
+	// To check if there are proxies that closed than 7 days and drop them.
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	for name, data := range m.info.ProxyStatistics {
+		if !data.LastCloseTime.IsZero() && time.Since(data.LastCloseTime) > time.Duration(7*24)*time.Hour {
+			delete(m.info.ProxyStatistics, name)
+			log.Trace("clear proxy [%s]'s statistics data, lastCloseTime: [%s]", name, data.LastCloseTime.String())
+		}
+	}
+}
+
+func (m *serverMetrics) NewClient() {
+	m.info.ClientCounts.Inc(1)
+}
+
+func (m *serverMetrics) CloseClient() {
+	m.info.ClientCounts.Dec(1)
+}
+
+func (m *serverMetrics) NewProxy(name string, proxyType string) {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	counter, ok := m.info.ProxyTypeCounts[proxyType]
+	if !ok {
+		counter = metric.NewCounter()
+	}
+	counter.Inc(1)
+	m.info.ProxyTypeCounts[proxyType] = counter
+
+	proxyStats, ok := m.info.ProxyStatistics[name]
+	if !(ok && proxyStats.ProxyType == proxyType) {
+		proxyStats = &ProxyStatistics{
+			Name:       name,
+			ProxyType:  proxyType,
+			CurConns:   metric.NewCounter(),
+			TrafficIn:  metric.NewDateCounter(ReserveDays),
+			TrafficOut: metric.NewDateCounter(ReserveDays),
+		}
+		m.info.ProxyStatistics[name] = proxyStats
+	}
+	proxyStats.LastStartTime = time.Now()
+}
+
+func (m *serverMetrics) CloseProxy(name string, proxyType string) {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	if counter, ok := m.info.ProxyTypeCounts[proxyType]; ok {
+		counter.Dec(1)
+	}
+	if proxyStats, ok := m.info.ProxyStatistics[name]; ok {
+		proxyStats.LastCloseTime = time.Now()
+	}
+}
+
+func (m *serverMetrics) OpenConnection(name string, proxyType string) {
+	m.info.CurConns.Inc(1)
+
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	proxyStats, ok := m.info.ProxyStatistics[name]
+	if ok {
+		proxyStats.CurConns.Inc(1)
+		m.info.ProxyStatistics[name] = proxyStats
+	}
+}
+
+func (m *serverMetrics) CloseConnection(name string, proxyType string) {
+	m.info.CurConns.Dec(1)
+
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	proxyStats, ok := m.info.ProxyStatistics[name]
+	if ok {
+		proxyStats.CurConns.Dec(1)
+		m.info.ProxyStatistics[name] = proxyStats
+	}
+}
+
+func (m *serverMetrics) AddTrafficIn(name string, proxyType string, trafficBytes int64) {
+	m.info.TotalTrafficIn.Inc(trafficBytes)
+
+	m.mu.Lock()
+	defer m.mu.Unlock()
+
+	proxyStats, ok := m.info.ProxyStatistics[name]
+	if ok {
+		proxyStats.TrafficIn.Inc(trafficBytes)
+		m.info.ProxyStatistics[name] = proxyStats
+	}
+}
+
+func (m *serverMetrics) AddTrafficOut(name string, proxyType string, trafficBytes int64) {
+	m.info.TotalTrafficOut.Inc(trafficBytes)
+
+	m.mu.Lock()
+	defer m.mu.Unlock()
+
+	proxyStats, ok := m.info.ProxyStatistics[name]
+	if ok {
+		proxyStats.TrafficOut.Inc(trafficBytes)
+		m.info.ProxyStatistics[name] = proxyStats
+	}
+}
+
+// Get stats data api.
+
+func (m *serverMetrics) GetServer() *ServerStats {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+	s := &ServerStats{
+		TotalTrafficIn:  m.info.TotalTrafficIn.TodayCount(),
+		TotalTrafficOut: m.info.TotalTrafficOut.TodayCount(),
+		CurConns:        m.info.CurConns.Count(),
+		ClientCounts:    m.info.ClientCounts.Count(),
+		ProxyTypeCounts: make(map[string]int64),
+	}
+	for k, v := range m.info.ProxyTypeCounts {
+		s.ProxyTypeCounts[k] = v.Count()
+	}
+	return s
+}
+
+func (m *serverMetrics) GetProxiesByType(proxyType string) []*ProxyStats {
+	res := make([]*ProxyStats, 0)
+	m.mu.Lock()
+	defer m.mu.Unlock()
+
+	for name, proxyStats := range m.info.ProxyStatistics {
+		if proxyStats.ProxyType != proxyType {
+			continue
+		}
+
+		ps := &ProxyStats{
+			Name:            name,
+			Type:            proxyStats.ProxyType,
+			TodayTrafficIn:  proxyStats.TrafficIn.TodayCount(),
+			TodayTrafficOut: proxyStats.TrafficOut.TodayCount(),
+			CurConns:        proxyStats.CurConns.Count(),
+		}
+		if !proxyStats.LastStartTime.IsZero() {
+			ps.LastStartTime = proxyStats.LastStartTime.Format("01-02 15:04:05")
+		}
+		if !proxyStats.LastCloseTime.IsZero() {
+			ps.LastCloseTime = proxyStats.LastCloseTime.Format("01-02 15:04:05")
+		}
+		res = append(res, ps)
+	}
+	return res
+}
+
+func (m *serverMetrics) GetProxiesByTypeAndName(proxyType string, proxyName string) (res *ProxyStats) {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+
+	for name, proxyStats := range m.info.ProxyStatistics {
+		if proxyStats.ProxyType != proxyType {
+			continue
+		}
+
+		if name != proxyName {
+			continue
+		}
+
+		res = &ProxyStats{
+			Name:            name,
+			Type:            proxyStats.ProxyType,
+			TodayTrafficIn:  proxyStats.TrafficIn.TodayCount(),
+			TodayTrafficOut: proxyStats.TrafficOut.TodayCount(),
+			CurConns:        proxyStats.CurConns.Count(),
+		}
+		if !proxyStats.LastStartTime.IsZero() {
+			res.LastStartTime = proxyStats.LastStartTime.Format("01-02 15:04:05")
+		}
+		if !proxyStats.LastCloseTime.IsZero() {
+			res.LastCloseTime = proxyStats.LastCloseTime.Format("01-02 15:04:05")
+		}
+		break
+	}
+	return
+}
+
+func (m *serverMetrics) GetProxyTraffic(name string) (res *ProxyTrafficInfo) {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+
+	proxyStats, ok := m.info.ProxyStatistics[name]
+	if ok {
+		res = &ProxyTrafficInfo{
+			Name: name,
+		}
+		res.TrafficIn = proxyStats.TrafficIn.GetLastDaysCount(ReserveDays)
+		res.TrafficOut = proxyStats.TrafficOut.GetLastDaysCount(ReserveDays)
+	}
+	return
+}

+ 1 - 48
server/stats/stats.go → models/metrics/mem/types.go

@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // See the License for the specific language governing permissions and
 // limitations under the License.
 // limitations under the License.
 
 
-package stats
+package mem
 
 
 import (
 import (
 	"time"
 	"time"
@@ -24,19 +24,6 @@ const (
 	ReserveDays = 7
 	ReserveDays = 7
 )
 )
 
 
-type StatsType int
-
-const (
-	TypeNewClient StatsType = iota
-	TypeCloseClient
-	TypeNewProxy
-	TypeCloseProxy
-	TypeOpenConnection
-	TypeCloseConnection
-	TypeAddTrafficIn
-	TypeAddTrafficOut
-)
-
 type ServerStats struct {
 type ServerStats struct {
 	TotalTrafficIn  int64
 	TotalTrafficIn  int64
 	TotalTrafficOut int64
 	TotalTrafficOut int64
@@ -88,42 +75,8 @@ type ServerStatistics struct {
 }
 }
 
 
 type Collector interface {
 type Collector interface {
-	Mark(statsType StatsType, payload interface{})
-	Run() error
 	GetServer() *ServerStats
 	GetServer() *ServerStats
 	GetProxiesByType(proxyType string) []*ProxyStats
 	GetProxiesByType(proxyType string) []*ProxyStats
 	GetProxiesByTypeAndName(proxyType string, proxyName string) *ProxyStats
 	GetProxiesByTypeAndName(proxyType string, proxyName string) *ProxyStats
 	GetProxyTraffic(name string) *ProxyTrafficInfo
 	GetProxyTraffic(name string) *ProxyTrafficInfo
 }
 }
-
-type NewClientPayload struct{}
-
-type CloseClientPayload struct{}
-
-type NewProxyPayload struct {
-	Name      string
-	ProxyType string
-}
-
-type CloseProxyPayload struct {
-	Name      string
-	ProxyType string
-}
-
-type OpenConnectionPayload struct {
-	ProxyName string
-}
-
-type CloseConnectionPayload struct {
-	ProxyName string
-}
-
-type AddTrafficInPayload struct {
-	ProxyName    string
-	TrafficBytes int64
-}
-
-type AddTrafficOutPayload struct {
-	ProxyName    string
-	TrafficBytes int64
-}

+ 8 - 0
models/metrics/metrics.go

@@ -0,0 +1,8 @@
+package metrics
+
+import (
+	"github.com/fatedier/frp/models/metrics/aggregate"
+)
+
+var EnableMem = aggregate.EnableMem
+var EnablePrometheus = aggregate.EnablePrometheus

+ 95 - 0
models/metrics/prometheus/server.go

@@ -0,0 +1,95 @@
+package prometheus
+
+import (
+	"github.com/fatedier/frp/server/metrics"
+
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+	namespace       = "frp"
+	serverSubsystem = "server"
+)
+
+var ServerMetrics metrics.ServerMetrics = newServerMetrics()
+
+type serverMetrics struct {
+	clientCount     prometheus.Gauge
+	proxyCount      *prometheus.GaugeVec
+	connectionCount *prometheus.GaugeVec
+	trafficIn       *prometheus.CounterVec
+	trafficOut      *prometheus.CounterVec
+}
+
+func (m *serverMetrics) NewClient() {
+	m.clientCount.Inc()
+}
+
+func (m *serverMetrics) CloseClient() {
+	m.clientCount.Dec()
+}
+
+func (m *serverMetrics) NewProxy(name string, proxyType string) {
+	m.proxyCount.WithLabelValues(proxyType).Inc()
+}
+
+func (m *serverMetrics) CloseProxy(name string, proxyType string) {
+	m.proxyCount.WithLabelValues(proxyType).Dec()
+}
+
+func (m *serverMetrics) OpenConnection(name string, proxyType string) {
+	m.connectionCount.WithLabelValues(name, proxyType).Inc()
+}
+
+func (m *serverMetrics) CloseConnection(name string, proxyType string) {
+	m.connectionCount.WithLabelValues(name, proxyType).Dec()
+}
+
+func (m *serverMetrics) AddTrafficIn(name string, proxyType string, trafficBytes int64) {
+	m.trafficIn.WithLabelValues(name, proxyType).Add(float64(trafficBytes))
+}
+
+func (m *serverMetrics) AddTrafficOut(name string, proxyType string, trafficBytes int64) {
+	m.trafficOut.WithLabelValues(name, proxyType).Add(float64(trafficBytes))
+}
+
+func newServerMetrics() *serverMetrics {
+	m := &serverMetrics{
+		clientCount: prometheus.NewGauge(prometheus.GaugeOpts{
+			Namespace: namespace,
+			Subsystem: serverSubsystem,
+			Name:      "client_counts",
+			Help:      "The current client counts of frps",
+		}),
+		proxyCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
+			Namespace: namespace,
+			Subsystem: serverSubsystem,
+			Name:      "proxy_counts",
+			Help:      "The current proxy counts",
+		}, []string{"type"}),
+		connectionCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
+			Namespace: namespace,
+			Subsystem: serverSubsystem,
+			Name:      "connection_counts",
+			Help:      "The current connection counts",
+		}, []string{"name", "type"}),
+		trafficIn: prometheus.NewCounterVec(prometheus.CounterOpts{
+			Namespace: namespace,
+			Subsystem: serverSubsystem,
+			Name:      "traffic_in",
+			Help:      "The total in traffic",
+		}, []string{"name", "type"}),
+		trafficOut: prometheus.NewCounterVec(prometheus.CounterOpts{
+			Namespace: namespace,
+			Subsystem: serverSubsystem,
+			Name:      "traffic_out",
+			Help:      "The total out traffic",
+		}, []string{"name", "type"}),
+	}
+	prometheus.MustRegister(m.clientCount)
+	prometheus.MustRegister(m.proxyCount)
+	prometheus.MustRegister(m.connectionCount)
+	prometheus.MustRegister(m.trafficIn)
+	prometheus.MustRegister(m.trafficOut)
+	return m
+}

+ 10 - 1
models/msg/msg.go

@@ -107,6 +107,9 @@ type NewProxy struct {
 
 
 	// stcp
 	// stcp
 	Sk string `json:"sk"`
 	Sk string `json:"sk"`
+
+	// tcpmux
+	Multiplexer string `json:"multiplexer"`
 }
 }
 
 
 type NewProxyResp struct {
 type NewProxyResp struct {
@@ -120,7 +123,9 @@ type CloseProxy struct {
 }
 }
 
 
 type NewWorkConn struct {
 type NewWorkConn struct {
-	RunId string `json:"run_id"`
+	RunId        string `json:"run_id"`
+	PrivilegeKey string `json:"privilege_key"`
+	Timestamp    int64  `json:"timestamp"`
 }
 }
 
 
 type ReqWorkConn struct {
 type ReqWorkConn struct {
@@ -132,6 +137,7 @@ type StartWorkConn struct {
 	DstAddr   string `json:"dst_addr"`
 	DstAddr   string `json:"dst_addr"`
 	SrcPort   uint16 `json:"src_port"`
 	SrcPort   uint16 `json:"src_port"`
 	DstPort   uint16 `json:"dst_port"`
 	DstPort   uint16 `json:"dst_port"`
+	Error     string `json:"error"`
 }
 }
 
 
 type NewVisitorConn struct {
 type NewVisitorConn struct {
@@ -148,9 +154,12 @@ type NewVisitorConnResp struct {
 }
 }
 
 
 type Ping struct {
 type Ping struct {
+	PrivilegeKey string `json:"privilege_key"`
+	Timestamp    int64  `json:"timestamp"`
 }
 }
 
 
 type Pong struct {
 type Pong struct {
+	Error string `json:"error"`
 }
 }
 
 
 type UdpPacket struct {
 type UdpPacket struct {

+ 1 - 0
models/plugin/server/http.go

@@ -84,6 +84,7 @@ func (p *httpPlugin) do(ctx context.Context, r *Request, res *Response) error {
 	}
 	}
 	req = req.WithContext(ctx)
 	req = req.WithContext(ctx)
 	req.Header.Set("X-Frp-Reqid", GetReqidFromContext(ctx))
 	req.Header.Set("X-Frp-Reqid", GetReqidFromContext(ctx))
+	req.Header.Set("Content-Type", "application/json")
 	resp, err := p.client.Do(req)
 	resp, err := p.client.Do(req)
 	if err != nil {
 	if err != nil {
 		return err
 		return err

+ 23 - 23
server/control.go

@@ -23,14 +23,16 @@ import (
 	"sync"
 	"sync"
 	"time"
 	"time"
 
 
+	"github.com/fatedier/frp/models/auth"
 	"github.com/fatedier/frp/models/config"
 	"github.com/fatedier/frp/models/config"
 	"github.com/fatedier/frp/models/consts"
 	"github.com/fatedier/frp/models/consts"
 	frpErr "github.com/fatedier/frp/models/errors"
 	frpErr "github.com/fatedier/frp/models/errors"
 	"github.com/fatedier/frp/models/msg"
 	"github.com/fatedier/frp/models/msg"
 	plugin "github.com/fatedier/frp/models/plugin/server"
 	plugin "github.com/fatedier/frp/models/plugin/server"
 	"github.com/fatedier/frp/server/controller"
 	"github.com/fatedier/frp/server/controller"
+	"github.com/fatedier/frp/server/metrics"
 	"github.com/fatedier/frp/server/proxy"
 	"github.com/fatedier/frp/server/proxy"
-	"github.com/fatedier/frp/server/stats"
+	"github.com/fatedier/frp/utils/util"
 	"github.com/fatedier/frp/utils/version"
 	"github.com/fatedier/frp/utils/version"
 	"github.com/fatedier/frp/utils/xlog"
 	"github.com/fatedier/frp/utils/xlog"
 
 
@@ -90,8 +92,8 @@ type Control struct {
 	// plugin manager
 	// plugin manager
 	pluginManager *plugin.Manager
 	pluginManager *plugin.Manager
 
 
-	// stats collector to store stats info of clients and proxies
-	statsCollector stats.Collector
+	// verifies authentication based on selected method
+	authVerifier auth.Verifier
 
 
 	// login message
 	// login message
 	loginMsg *msg.Login
 	loginMsg *msg.Login
@@ -147,7 +149,7 @@ func NewControl(
 	rc *controller.ResourceController,
 	rc *controller.ResourceController,
 	pxyManager *proxy.ProxyManager,
 	pxyManager *proxy.ProxyManager,
 	pluginManager *plugin.Manager,
 	pluginManager *plugin.Manager,
-	statsCollector stats.Collector,
+	authVerifier auth.Verifier,
 	ctlConn net.Conn,
 	ctlConn net.Conn,
 	loginMsg *msg.Login,
 	loginMsg *msg.Login,
 	serverCfg config.ServerCommonConf,
 	serverCfg config.ServerCommonConf,
@@ -161,7 +163,7 @@ func NewControl(
 		rc:              rc,
 		rc:              rc,
 		pxyManager:      pxyManager,
 		pxyManager:      pxyManager,
 		pluginManager:   pluginManager,
 		pluginManager:   pluginManager,
-		statsCollector:  statsCollector,
+		authVerifier:    authVerifier,
 		conn:            ctlConn,
 		conn:            ctlConn,
 		loginMsg:        loginMsg,
 		loginMsg:        loginMsg,
 		sendCh:          make(chan msg.Message, 10),
 		sendCh:          make(chan msg.Message, 10),
@@ -203,7 +205,7 @@ func (ctl *Control) Start() {
 	go ctl.stoper()
 	go ctl.stoper()
 }
 }
 
 
-func (ctl *Control) RegisterWorkConn(conn net.Conn) {
+func (ctl *Control) RegisterWorkConn(conn net.Conn) error {
 	xl := ctl.xl
 	xl := ctl.xl
 	defer func() {
 	defer func() {
 		if err := recover(); err != nil {
 		if err := recover(); err != nil {
@@ -215,9 +217,10 @@ func (ctl *Control) RegisterWorkConn(conn net.Conn) {
 	select {
 	select {
 	case ctl.workConnCh <- conn:
 	case ctl.workConnCh <- conn:
 		xl.Debug("new work connection registered")
 		xl.Debug("new work connection registered")
+		return nil
 	default:
 	default:
 		xl.Debug("work connection pool is full, discarding")
 		xl.Debug("work connection pool is full, discarding")
-		conn.Close()
+		return fmt.Errorf("work connection pool is full, discarding")
 	}
 	}
 }
 }
 
 
@@ -373,16 +376,12 @@ func (ctl *Control) stoper() {
 	for _, pxy := range ctl.proxies {
 	for _, pxy := range ctl.proxies {
 		pxy.Close()
 		pxy.Close()
 		ctl.pxyManager.Del(pxy.GetName())
 		ctl.pxyManager.Del(pxy.GetName())
-		ctl.statsCollector.Mark(stats.TypeCloseProxy, &stats.CloseProxyPayload{
-			Name:      pxy.GetName(),
-			ProxyType: pxy.GetConf().GetBaseInfo().ProxyType,
-		})
+		metrics.Server.CloseProxy(pxy.GetName(), pxy.GetConf().GetBaseInfo().ProxyType)
 	}
 	}
 
 
 	ctl.allShutdown.Done()
 	ctl.allShutdown.Done()
 	xl.Info("client exit success")
 	xl.Info("client exit success")
-
-	ctl.statsCollector.Mark(stats.TypeCloseClient, &stats.CloseClientPayload{})
+	metrics.Server.CloseClient()
 }
 }
 
 
 // block until Control closed
 // block until Control closed
@@ -438,21 +437,25 @@ func (ctl *Control) manager() {
 					ProxyName: m.ProxyName,
 					ProxyName: m.ProxyName,
 				}
 				}
 				if err != nil {
 				if err != nil {
-					resp.Error = err.Error()
 					xl.Warn("new proxy [%s] error: %v", m.ProxyName, err)
 					xl.Warn("new proxy [%s] error: %v", m.ProxyName, err)
+					resp.Error = util.GenerateResponseErrorString(fmt.Sprintf("new proxy [%s] error", m.ProxyName), err, ctl.serverCfg.DetailedErrorsToClient)
 				} else {
 				} else {
 					resp.RemoteAddr = remoteAddr
 					resp.RemoteAddr = remoteAddr
 					xl.Info("new proxy [%s] success", m.ProxyName)
 					xl.Info("new proxy [%s] success", m.ProxyName)
-					ctl.statsCollector.Mark(stats.TypeNewProxy, &stats.NewProxyPayload{
-						Name:      m.ProxyName,
-						ProxyType: m.ProxyType,
-					})
+					metrics.Server.NewProxy(m.ProxyName, m.ProxyType)
 				}
 				}
 				ctl.sendCh <- resp
 				ctl.sendCh <- resp
 			case *msg.CloseProxy:
 			case *msg.CloseProxy:
 				ctl.CloseProxy(m)
 				ctl.CloseProxy(m)
 				xl.Info("close proxy [%s] success", m.ProxyName)
 				xl.Info("close proxy [%s] success", m.ProxyName)
 			case *msg.Ping:
 			case *msg.Ping:
+				if err := ctl.authVerifier.VerifyPing(m); err != nil {
+					xl.Warn("received invalid ping: %v", err)
+					ctl.sendCh <- &msg.Pong{
+						Error: "invalid authentication in ping",
+					}
+					return
+				}
 				ctl.lastPing = time.Now()
 				ctl.lastPing = time.Now()
 				xl.Debug("receive heartbeat")
 				xl.Debug("receive heartbeat")
 				ctl.sendCh <- &msg.Pong{}
 				ctl.sendCh <- &msg.Pong{}
@@ -471,7 +474,7 @@ func (ctl *Control) RegisterProxy(pxyMsg *msg.NewProxy) (remoteAddr string, err
 
 
 	// NewProxy will return a interface Proxy.
 	// NewProxy will return a interface Proxy.
 	// In fact it create different proxies by different proxy type, we just call run() here.
 	// In fact it create different proxies by different proxy type, we just call run() here.
-	pxy, err := proxy.NewProxy(ctl.ctx, ctl.runId, ctl.rc, ctl.statsCollector, ctl.poolCount, ctl.GetWorkConn, pxyConf, ctl.serverCfg)
+	pxy, err := proxy.NewProxy(ctl.ctx, ctl.runId, ctl.rc, ctl.poolCount, ctl.GetWorkConn, pxyConf, ctl.serverCfg)
 	if err != nil {
 	if err != nil {
 		return remoteAddr, err
 		return remoteAddr, err
 	}
 	}
@@ -533,9 +536,6 @@ func (ctl *Control) CloseProxy(closeMsg *msg.CloseProxy) (err error) {
 	delete(ctl.proxies, closeMsg.ProxyName)
 	delete(ctl.proxies, closeMsg.ProxyName)
 	ctl.mu.Unlock()
 	ctl.mu.Unlock()
 
 
-	ctl.statsCollector.Mark(stats.TypeCloseProxy, &stats.CloseProxyPayload{
-		Name:      pxy.GetName(),
-		ProxyType: pxy.GetConf().GetBaseInfo().ProxyType,
-	})
+	metrics.Server.CloseProxy(pxy.GetName(), pxy.GetConf().GetBaseInfo().ProxyType)
 	return
 	return
 }
 }

+ 4 - 0
server/controller/resource.go

@@ -18,6 +18,7 @@ import (
 	"github.com/fatedier/frp/models/nathole"
 	"github.com/fatedier/frp/models/nathole"
 	"github.com/fatedier/frp/server/group"
 	"github.com/fatedier/frp/server/group"
 	"github.com/fatedier/frp/server/ports"
 	"github.com/fatedier/frp/server/ports"
+	"github.com/fatedier/frp/utils/tcpmux"
 	"github.com/fatedier/frp/utils/vhost"
 	"github.com/fatedier/frp/utils/vhost"
 )
 )
 
 
@@ -46,4 +47,7 @@ type ResourceController struct {
 
 
 	// Controller for nat hole connections
 	// Controller for nat hole connections
 	NatHoleController *nathole.NatHoleController
 	NatHoleController *nathole.NatHoleController
+
+	// TcpMux HTTP CONNECT multiplexer
+	TcpMuxHttpConnectMuxer *tcpmux.HttpConnectTcpMuxer
 }
 }

+ 6 - 0
server/dashboard.go

@@ -24,6 +24,7 @@ import (
 	frpNet "github.com/fatedier/frp/utils/net"
 	frpNet "github.com/fatedier/frp/utils/net"
 
 
 	"github.com/gorilla/mux"
 	"github.com/gorilla/mux"
+	"github.com/prometheus/client_golang/prometheus/promhttp"
 )
 )
 
 
 var (
 var (
@@ -38,6 +39,11 @@ func (svr *Service) RunDashboardServer(addr string, port int) (err error) {
 	user, passwd := svr.cfg.DashboardUser, svr.cfg.DashboardPwd
 	user, passwd := svr.cfg.DashboardUser, svr.cfg.DashboardPwd
 	router.Use(frpNet.NewHttpAuthMiddleware(user, passwd).Middleware)
 	router.Use(frpNet.NewHttpAuthMiddleware(user, passwd).Middleware)
 
 
+	// metrics
+	if svr.cfg.EnablePrometheus {
+		router.Handle("/metrics", promhttp.Handler())
+	}
+
 	// api, see dashboard_api.go
 	// api, see dashboard_api.go
 	router.HandleFunc("/api/serverinfo", svr.ApiServerInfo).Methods("GET")
 	router.HandleFunc("/api/serverinfo", svr.ApiServerInfo).Methods("GET")
 	router.HandleFunc("/api/proxy/{type}", svr.ApiProxyByType).Methods("GET")
 	router.HandleFunc("/api/proxy/{type}", svr.ApiProxyByType).Methods("GET")

+ 13 - 4
server/dashboard_api.go

@@ -20,6 +20,7 @@ import (
 
 
 	"github.com/fatedier/frp/models/config"
 	"github.com/fatedier/frp/models/config"
 	"github.com/fatedier/frp/models/consts"
 	"github.com/fatedier/frp/models/consts"
+	"github.com/fatedier/frp/models/metrics/mem"
 	"github.com/fatedier/frp/utils/log"
 	"github.com/fatedier/frp/utils/log"
 	"github.com/fatedier/frp/utils/version"
 	"github.com/fatedier/frp/utils/version"
 
 
@@ -62,7 +63,7 @@ func (svr *Service) ApiServerInfo(w http.ResponseWriter, r *http.Request) {
 	}()
 	}()
 
 
 	log.Info("Http request: [%s]", r.URL.Path)
 	log.Info("Http request: [%s]", r.URL.Path)
-	serverStats := svr.statsCollector.GetServer()
+	serverStats := mem.StatsCollector.GetServer()
 	svrResp := ServerInfoResp{
 	svrResp := ServerInfoResp{
 		Version:           version.Full(),
 		Version:           version.Full(),
 		BindPort:          svr.cfg.BindPort,
 		BindPort:          svr.cfg.BindPort,
@@ -95,6 +96,12 @@ type TcpOutConf struct {
 	RemotePort int `json:"remote_port"`
 	RemotePort int `json:"remote_port"`
 }
 }
 
 
+type TcpMuxOutConf struct {
+	BaseOutConf
+	config.DomainConf
+	Multiplexer string `json:"multiplexer"`
+}
+
 type UdpOutConf struct {
 type UdpOutConf struct {
 	BaseOutConf
 	BaseOutConf
 	RemotePort int `json:"remote_port"`
 	RemotePort int `json:"remote_port"`
@@ -124,6 +131,8 @@ func getConfByType(proxyType string) interface{} {
 	switch proxyType {
 	switch proxyType {
 	case consts.TcpProxy:
 	case consts.TcpProxy:
 		return &TcpOutConf{}
 		return &TcpOutConf{}
+	case consts.TcpMuxProxy:
+		return &TcpMuxOutConf{}
 	case consts.UdpProxy:
 	case consts.UdpProxy:
 		return &UdpOutConf{}
 		return &UdpOutConf{}
 	case consts.HttpProxy:
 	case consts.HttpProxy:
@@ -178,7 +187,7 @@ func (svr *Service) ApiProxyByType(w http.ResponseWriter, r *http.Request) {
 }
 }
 
 
 func (svr *Service) getProxyStatsByType(proxyType string) (proxyInfos []*ProxyStatsInfo) {
 func (svr *Service) getProxyStatsByType(proxyType string) (proxyInfos []*ProxyStatsInfo) {
-	proxyStats := svr.statsCollector.GetProxiesByType(proxyType)
+	proxyStats := mem.StatsCollector.GetProxiesByType(proxyType)
 	proxyInfos = make([]*ProxyStatsInfo, 0, len(proxyStats))
 	proxyInfos = make([]*ProxyStatsInfo, 0, len(proxyStats))
 	for _, ps := range proxyStats {
 	for _, ps := range proxyStats {
 		proxyInfo := &ProxyStatsInfo{}
 		proxyInfo := &ProxyStatsInfo{}
@@ -248,7 +257,7 @@ func (svr *Service) ApiProxyByTypeAndName(w http.ResponseWriter, r *http.Request
 
 
 func (svr *Service) getProxyStatsByTypeAndName(proxyType string, proxyName string) (proxyInfo GetProxyStatsResp, code int, msg string) {
 func (svr *Service) getProxyStatsByTypeAndName(proxyType string, proxyName string) (proxyInfo GetProxyStatsResp, code int, msg string) {
 	proxyInfo.Name = proxyName
 	proxyInfo.Name = proxyName
-	ps := svr.statsCollector.GetProxiesByTypeAndName(proxyType, proxyName)
+	ps := mem.StatsCollector.GetProxiesByTypeAndName(proxyType, proxyName)
 	if ps == nil {
 	if ps == nil {
 		code = 404
 		code = 404
 		msg = "no proxy info found"
 		msg = "no proxy info found"
@@ -306,7 +315,7 @@ func (svr *Service) ApiProxyTraffic(w http.ResponseWriter, r *http.Request) {
 
 
 	trafficResp := GetProxyTrafficResp{}
 	trafficResp := GetProxyTrafficResp{}
 	trafficResp.Name = name
 	trafficResp.Name = name
-	proxyTrafficInfo := svr.statsCollector.GetProxyTraffic(name)
+	proxyTrafficInfo := mem.StatsCollector.GetProxyTraffic(name)
 
 
 	if proxyTrafficInfo == nil {
 	if proxyTrafficInfo == nil {
 		res.Code = 404
 		res.Code = 404

+ 37 - 0
server/metrics/metrics.go

@@ -0,0 +1,37 @@
+package metrics
+
+import (
+	"sync"
+)
+
+type ServerMetrics interface {
+	NewClient()
+	CloseClient()
+	NewProxy(name string, proxyType string)
+	CloseProxy(name string, proxyType string)
+	OpenConnection(name string, proxyType string)
+	CloseConnection(name string, proxyType string)
+	AddTrafficIn(name string, proxyType string, trafficBytes int64)
+	AddTrafficOut(name string, proxyType string, trafficBytes int64)
+}
+
+var Server ServerMetrics = noopServerMetrics{}
+
+var registerMetrics sync.Once
+
+func Register(m ServerMetrics) {
+	registerMetrics.Do(func() {
+		Server = m
+	})
+}
+
+type noopServerMetrics struct{}
+
+func (noopServerMetrics) NewClient()                                                      {}
+func (noopServerMetrics) CloseClient()                                                    {}
+func (noopServerMetrics) NewProxy(name string, proxyType string)                          {}
+func (noopServerMetrics) CloseProxy(name string, proxyType string)                        {}
+func (noopServerMetrics) OpenConnection(name string, proxyType string)                    {}
+func (noopServerMetrics) CloseConnection(name string, proxyType string)                   {}
+func (noopServerMetrics) AddTrafficIn(name string, proxyType string, trafficBytes int64)  {}
+func (noopServerMetrics) AddTrafficOut(name string, proxyType string, trafficBytes int64) {}

+ 6 - 11
server/proxy/http.go

@@ -20,7 +20,7 @@ import (
 	"strings"
 	"strings"
 
 
 	"github.com/fatedier/frp/models/config"
 	"github.com/fatedier/frp/models/config"
-	"github.com/fatedier/frp/server/stats"
+	"github.com/fatedier/frp/server/metrics"
 	frpNet "github.com/fatedier/frp/utils/net"
 	frpNet "github.com/fatedier/frp/utils/net"
 	"github.com/fatedier/frp/utils/util"
 	"github.com/fatedier/frp/utils/util"
 	"github.com/fatedier/frp/utils/vhost"
 	"github.com/fatedier/frp/utils/vhost"
@@ -159,21 +159,16 @@ func (pxy *HttpProxy) GetRealConn(remoteAddr string) (workConn net.Conn, err err
 	}
 	}
 	workConn = frpNet.WrapReadWriteCloserToConn(rwc, tmpConn)
 	workConn = frpNet.WrapReadWriteCloserToConn(rwc, tmpConn)
 	workConn = frpNet.WrapStatsConn(workConn, pxy.updateStatsAfterClosedConn)
 	workConn = frpNet.WrapStatsConn(workConn, pxy.updateStatsAfterClosedConn)
-	pxy.statsCollector.Mark(stats.TypeOpenConnection, &stats.OpenConnectionPayload{ProxyName: pxy.GetName()})
+	metrics.Server.OpenConnection(pxy.GetName(), pxy.GetConf().GetBaseInfo().ProxyType)
 	return
 	return
 }
 }
 
 
 func (pxy *HttpProxy) updateStatsAfterClosedConn(totalRead, totalWrite int64) {
 func (pxy *HttpProxy) updateStatsAfterClosedConn(totalRead, totalWrite int64) {
 	name := pxy.GetName()
 	name := pxy.GetName()
-	pxy.statsCollector.Mark(stats.TypeCloseProxy, &stats.CloseConnectionPayload{ProxyName: name})
-	pxy.statsCollector.Mark(stats.TypeAddTrafficIn, &stats.AddTrafficInPayload{
-		ProxyName:    name,
-		TrafficBytes: totalWrite,
-	})
-	pxy.statsCollector.Mark(stats.TypeAddTrafficOut, &stats.AddTrafficOutPayload{
-		ProxyName:    name,
-		TrafficBytes: totalRead,
-	})
+	proxyType := pxy.GetConf().GetBaseInfo().ProxyType
+	metrics.Server.CloseConnection(name, proxyType)
+	metrics.Server.AddTrafficIn(name, proxyType, totalWrite)
+	metrics.Server.AddTrafficOut(name, proxyType, totalRead)
 }
 }
 
 
 func (pxy *HttpProxy) Close() {
 func (pxy *HttpProxy) Close() {

+ 32 - 32
server/proxy/proxy.go

@@ -25,7 +25,7 @@ import (
 	"github.com/fatedier/frp/models/config"
 	"github.com/fatedier/frp/models/config"
 	"github.com/fatedier/frp/models/msg"
 	"github.com/fatedier/frp/models/msg"
 	"github.com/fatedier/frp/server/controller"
 	"github.com/fatedier/frp/server/controller"
-	"github.com/fatedier/frp/server/stats"
+	"github.com/fatedier/frp/server/metrics"
 	frpNet "github.com/fatedier/frp/utils/net"
 	frpNet "github.com/fatedier/frp/utils/net"
 	"github.com/fatedier/frp/utils/xlog"
 	"github.com/fatedier/frp/utils/xlog"
 
 
@@ -45,14 +45,13 @@ type Proxy interface {
 }
 }
 
 
 type BaseProxy struct {
 type BaseProxy struct {
-	name           string
-	rc             *controller.ResourceController
-	statsCollector stats.Collector
-	listeners      []net.Listener
-	usedPortsNum   int
-	poolCount      int
-	getWorkConnFn  GetWorkConnFn
-	serverCfg      config.ServerCommonConf
+	name          string
+	rc            *controller.ResourceController
+	listeners     []net.Listener
+	usedPortsNum  int
+	poolCount     int
+	getWorkConnFn GetWorkConnFn
+	serverCfg     config.ServerCommonConf
 
 
 	mu  sync.RWMutex
 	mu  sync.RWMutex
 	xl  *xlog.Logger
 	xl  *xlog.Logger
@@ -116,6 +115,7 @@ func (pxy *BaseProxy) GetWorkConnFromPool(src, dst net.Addr) (workConn net.Conn,
 			SrcPort:   uint16(srcPort),
 			SrcPort:   uint16(srcPort),
 			DstAddr:   dstAddr,
 			DstAddr:   dstAddr,
 			DstPort:   uint16(dstPort),
 			DstPort:   uint16(dstPort),
+			Error:     "",
 		})
 		})
 		if err != nil {
 		if err != nil {
 			xl.Warn("failed to send message to work connection from pool: %v, times: %d", err, i)
 			xl.Warn("failed to send message to work connection from pool: %v, times: %d", err, i)
@@ -135,7 +135,7 @@ func (pxy *BaseProxy) GetWorkConnFromPool(src, dst net.Addr) (workConn net.Conn,
 // startListenHandler start a goroutine handler for each listener.
 // startListenHandler start a goroutine handler for each listener.
 // p: p will just be passed to handler(Proxy, frpNet.Conn).
 // p: p will just be passed to handler(Proxy, frpNet.Conn).
 // handler: each proxy type can set different handler function to deal with connections accepted from listeners.
 // handler: each proxy type can set different handler function to deal with connections accepted from listeners.
-func (pxy *BaseProxy) startListenHandler(p Proxy, handler func(Proxy, net.Conn, stats.Collector, config.ServerCommonConf)) {
+func (pxy *BaseProxy) startListenHandler(p Proxy, handler func(Proxy, net.Conn, config.ServerCommonConf)) {
 	xl := xlog.FromContextSafe(pxy.ctx)
 	xl := xlog.FromContextSafe(pxy.ctx)
 	for _, listener := range pxy.listeners {
 	for _, listener := range pxy.listeners {
 		go func(l net.Listener) {
 		go func(l net.Listener) {
@@ -148,26 +148,25 @@ func (pxy *BaseProxy) startListenHandler(p Proxy, handler func(Proxy, net.Conn,
 					return
 					return
 				}
 				}
 				xl.Debug("get a user connection [%s]", c.RemoteAddr().String())
 				xl.Debug("get a user connection [%s]", c.RemoteAddr().String())
-				go handler(p, c, pxy.statsCollector, pxy.serverCfg)
+				go handler(p, c, pxy.serverCfg)
 			}
 			}
 		}(listener)
 		}(listener)
 	}
 	}
 }
 }
 
 
-func NewProxy(ctx context.Context, runId string, rc *controller.ResourceController, statsCollector stats.Collector, poolCount int,
+func NewProxy(ctx context.Context, runId string, rc *controller.ResourceController, poolCount int,
 	getWorkConnFn GetWorkConnFn, pxyConf config.ProxyConf, serverCfg config.ServerCommonConf) (pxy Proxy, err error) {
 	getWorkConnFn GetWorkConnFn, pxyConf config.ProxyConf, serverCfg config.ServerCommonConf) (pxy Proxy, err error) {
 
 
 	xl := xlog.FromContextSafe(ctx).Spawn().AppendPrefix(pxyConf.GetBaseInfo().ProxyName)
 	xl := xlog.FromContextSafe(ctx).Spawn().AppendPrefix(pxyConf.GetBaseInfo().ProxyName)
 	basePxy := BaseProxy{
 	basePxy := BaseProxy{
-		name:           pxyConf.GetBaseInfo().ProxyName,
-		rc:             rc,
-		statsCollector: statsCollector,
-		listeners:      make([]net.Listener, 0),
-		poolCount:      poolCount,
-		getWorkConnFn:  getWorkConnFn,
-		serverCfg:      serverCfg,
-		xl:             xl,
-		ctx:            xlog.NewContext(ctx, xl),
+		name:          pxyConf.GetBaseInfo().ProxyName,
+		rc:            rc,
+		listeners:     make([]net.Listener, 0),
+		poolCount:     poolCount,
+		getWorkConnFn: getWorkConnFn,
+		serverCfg:     serverCfg,
+		xl:            xl,
+		ctx:           xlog.NewContext(ctx, xl),
 	}
 	}
 	switch cfg := pxyConf.(type) {
 	switch cfg := pxyConf.(type) {
 	case *config.TcpProxyConf:
 	case *config.TcpProxyConf:
@@ -176,6 +175,11 @@ func NewProxy(ctx context.Context, runId string, rc *controller.ResourceControll
 			BaseProxy: &basePxy,
 			BaseProxy: &basePxy,
 			cfg:       cfg,
 			cfg:       cfg,
 		}
 		}
+	case *config.TcpMuxProxyConf:
+		pxy = &TcpMuxProxy{
+			BaseProxy: &basePxy,
+			cfg:       cfg,
+		}
 	case *config.HttpProxyConf:
 	case *config.HttpProxyConf:
 		pxy = &HttpProxy{
 		pxy = &HttpProxy{
 			BaseProxy: &basePxy,
 			BaseProxy: &basePxy,
@@ -210,7 +214,7 @@ func NewProxy(ctx context.Context, runId string, rc *controller.ResourceControll
 
 
 // HandleUserTcpConnection is used for incoming tcp user connections.
 // HandleUserTcpConnection is used for incoming tcp user connections.
 // It can be used for tcp, http, https type.
 // It can be used for tcp, http, https type.
-func HandleUserTcpConnection(pxy Proxy, userConn net.Conn, statsCollector stats.Collector, serverCfg config.ServerCommonConf) {
+func HandleUserTcpConnection(pxy Proxy, userConn net.Conn, serverCfg config.ServerCommonConf) {
 	xl := xlog.FromContextSafe(pxy.Context())
 	xl := xlog.FromContextSafe(pxy.Context())
 	defer userConn.Close()
 	defer userConn.Close()
 
 
@@ -237,17 +241,13 @@ func HandleUserTcpConnection(pxy Proxy, userConn net.Conn, statsCollector stats.
 	xl.Debug("join connections, workConn(l[%s] r[%s]) userConn(l[%s] r[%s])", workConn.LocalAddr().String(),
 	xl.Debug("join connections, workConn(l[%s] r[%s]) userConn(l[%s] r[%s])", workConn.LocalAddr().String(),
 		workConn.RemoteAddr().String(), userConn.LocalAddr().String(), userConn.RemoteAddr().String())
 		workConn.RemoteAddr().String(), userConn.LocalAddr().String(), userConn.RemoteAddr().String())
 
 
-	statsCollector.Mark(stats.TypeOpenConnection, &stats.OpenConnectionPayload{ProxyName: pxy.GetName()})
+	name := pxy.GetName()
+	proxyType := pxy.GetConf().GetBaseInfo().ProxyType
+	metrics.Server.OpenConnection(name, proxyType)
 	inCount, outCount := frpIo.Join(local, userConn)
 	inCount, outCount := frpIo.Join(local, userConn)
-	statsCollector.Mark(stats.TypeCloseConnection, &stats.CloseConnectionPayload{ProxyName: pxy.GetName()})
-	statsCollector.Mark(stats.TypeAddTrafficIn, &stats.AddTrafficInPayload{
-		ProxyName:    pxy.GetName(),
-		TrafficBytes: inCount,
-	})
-	statsCollector.Mark(stats.TypeAddTrafficOut, &stats.AddTrafficOutPayload{
-		ProxyName:    pxy.GetName(),
-		TrafficBytes: outCount,
-	})
+	metrics.Server.CloseConnection(name, proxyType)
+	metrics.Server.AddTrafficIn(name, proxyType, inCount)
+	metrics.Server.AddTrafficOut(name, proxyType, outCount)
 	xl.Debug("join connections closed")
 	xl.Debug("join connections closed")
 }
 }
 
 

+ 95 - 0
server/proxy/tcpmux.go

@@ -0,0 +1,95 @@
+// Copyright 2020 guylewin, guy@lewin.co.il
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package proxy
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/fatedier/frp/models/config"
+	"github.com/fatedier/frp/models/consts"
+	"github.com/fatedier/frp/utils/util"
+	"github.com/fatedier/frp/utils/vhost"
+)
+
+type TcpMuxProxy struct {
+	*BaseProxy
+	cfg *config.TcpMuxProxyConf
+
+	realPort int
+}
+
+func (pxy *TcpMuxProxy) httpConnectListen(domain string, addrs []string) ([]string, error) {
+	routeConfig := &vhost.VhostRouteConfig{
+		Domain: domain,
+	}
+	l, err := pxy.rc.TcpMuxHttpConnectMuxer.Listen(pxy.ctx, routeConfig)
+	if err != nil {
+		return nil, err
+	}
+	pxy.xl.Info("tcpmux httpconnect multiplexer listens for host [%s]", routeConfig.Domain)
+	pxy.listeners = append(pxy.listeners, l)
+	return append(addrs, util.CanonicalAddr(routeConfig.Domain, pxy.serverCfg.TcpMuxHttpConnectPort)), nil
+}
+
+func (pxy *TcpMuxProxy) httpConnectRun() (remoteAddr string, err error) {
+	addrs := make([]string, 0)
+	for _, domain := range pxy.cfg.CustomDomains {
+		if domain == "" {
+			continue
+		}
+
+		addrs, err = pxy.httpConnectListen(domain, addrs)
+		if err != nil {
+			return "", err
+		}
+	}
+
+	if pxy.cfg.SubDomain != "" {
+		addrs, err = pxy.httpConnectListen(pxy.cfg.SubDomain+"."+pxy.serverCfg.SubDomainHost, addrs)
+		if err != nil {
+			return "", err
+		}
+	}
+
+	pxy.startListenHandler(pxy, HandleUserTcpConnection)
+	remoteAddr = strings.Join(addrs, ",")
+	return remoteAddr, err
+}
+
+func (pxy *TcpMuxProxy) Run() (remoteAddr string, err error) {
+	switch pxy.cfg.Multiplexer {
+	case consts.HttpConnectTcpMultiplexer:
+		remoteAddr, err = pxy.httpConnectRun()
+	default:
+		err = fmt.Errorf("unknown multiplexer [%s]", pxy.cfg.Multiplexer)
+	}
+
+	if err != nil {
+		pxy.Close()
+	}
+	return remoteAddr, err
+}
+
+func (pxy *TcpMuxProxy) GetConf() config.ProxyConf {
+	return pxy.cfg
+}
+
+func (pxy *TcpMuxProxy) Close() {
+	pxy.BaseProxy.Close()
+	if pxy.cfg.Group == "" {
+		pxy.rc.TcpPortManager.Release(pxy.realPort)
+	}
+}

+ 11 - 9
server/proxy/udp.go

@@ -23,7 +23,7 @@ import (
 	"github.com/fatedier/frp/models/config"
 	"github.com/fatedier/frp/models/config"
 	"github.com/fatedier/frp/models/msg"
 	"github.com/fatedier/frp/models/msg"
 	"github.com/fatedier/frp/models/proto/udp"
 	"github.com/fatedier/frp/models/proto/udp"
-	"github.com/fatedier/frp/server/stats"
+	"github.com/fatedier/frp/server/metrics"
 
 
 	"github.com/fatedier/golib/errors"
 	"github.com/fatedier/golib/errors"
 )
 )
@@ -114,10 +114,11 @@ func (pxy *UdpProxy) Run() (remoteAddr string, err error) {
 				if errRet := errors.PanicToError(func() {
 				if errRet := errors.PanicToError(func() {
 					xl.Trace("get udp message from workConn: %s", m.Content)
 					xl.Trace("get udp message from workConn: %s", m.Content)
 					pxy.readCh <- m
 					pxy.readCh <- m
-					pxy.statsCollector.Mark(stats.TypeAddTrafficOut, &stats.AddTrafficOutPayload{
-						ProxyName:    pxy.GetName(),
-						TrafficBytes: int64(len(m.Content)),
-					})
+					metrics.Server.AddTrafficOut(
+						pxy.GetName(),
+						pxy.GetConf().GetBaseInfo().ProxyType,
+						int64(len(m.Content)),
+					)
 				}); errRet != nil {
 				}); errRet != nil {
 					conn.Close()
 					conn.Close()
 					xl.Info("reader goroutine for udp work connection closed")
 					xl.Info("reader goroutine for udp work connection closed")
@@ -143,10 +144,11 @@ func (pxy *UdpProxy) Run() (remoteAddr string, err error) {
 					return
 					return
 				} else {
 				} else {
 					xl.Trace("send message to udp workConn: %s", udpMsg.Content)
 					xl.Trace("send message to udp workConn: %s", udpMsg.Content)
-					pxy.statsCollector.Mark(stats.TypeAddTrafficIn, &stats.AddTrafficInPayload{
-						ProxyName:    pxy.GetName(),
-						TrafficBytes: int64(len(udpMsg.Content)),
-					})
+					metrics.Server.AddTrafficIn(
+						pxy.GetName(),
+						pxy.GetConf().GetBaseInfo().ProxyType,
+						int64(len(udpMsg.Content)),
+					)
 					continue
 					continue
 				}
 				}
 			case <-ctx.Done():
 			case <-ctx.Done():

+ 53 - 20
server/service.go

@@ -30,17 +30,20 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/fatedier/frp/assets"
 	"github.com/fatedier/frp/assets"
+	"github.com/fatedier/frp/models/auth"
 	"github.com/fatedier/frp/models/config"
 	"github.com/fatedier/frp/models/config"
+	modelmetrics "github.com/fatedier/frp/models/metrics"
 	"github.com/fatedier/frp/models/msg"
 	"github.com/fatedier/frp/models/msg"
 	"github.com/fatedier/frp/models/nathole"
 	"github.com/fatedier/frp/models/nathole"
 	plugin "github.com/fatedier/frp/models/plugin/server"
 	plugin "github.com/fatedier/frp/models/plugin/server"
 	"github.com/fatedier/frp/server/controller"
 	"github.com/fatedier/frp/server/controller"
 	"github.com/fatedier/frp/server/group"
 	"github.com/fatedier/frp/server/group"
+	"github.com/fatedier/frp/server/metrics"
 	"github.com/fatedier/frp/server/ports"
 	"github.com/fatedier/frp/server/ports"
 	"github.com/fatedier/frp/server/proxy"
 	"github.com/fatedier/frp/server/proxy"
-	"github.com/fatedier/frp/server/stats"
 	"github.com/fatedier/frp/utils/log"
 	"github.com/fatedier/frp/utils/log"
 	frpNet "github.com/fatedier/frp/utils/net"
 	frpNet "github.com/fatedier/frp/utils/net"
+	"github.com/fatedier/frp/utils/tcpmux"
 	"github.com/fatedier/frp/utils/util"
 	"github.com/fatedier/frp/utils/util"
 	"github.com/fatedier/frp/utils/version"
 	"github.com/fatedier/frp/utils/version"
 	"github.com/fatedier/frp/utils/vhost"
 	"github.com/fatedier/frp/utils/vhost"
@@ -51,7 +54,8 @@ import (
 )
 )
 
 
 const (
 const (
-	connReadTimeout time.Duration = 10 * time.Second
+	connReadTimeout       time.Duration = 10 * time.Second
+	vhostReadWriteTimeout time.Duration = 30 * time.Second
 )
 )
 
 
 // Server service
 // Server service
@@ -86,8 +90,8 @@ type Service struct {
 	// All resource managers and controllers
 	// All resource managers and controllers
 	rc *controller.ResourceController
 	rc *controller.ResourceController
 
 
-	// stats collector to store server and proxies stats info
-	statsCollector stats.Collector
+	// Verifies authentication based on selected method
+	authVerifier auth.Verifier
 
 
 	tlsConfig *tls.Config
 	tlsConfig *tls.Config
 
 
@@ -105,6 +109,7 @@ func NewService(cfg config.ServerCommonConf) (svr *Service, err error) {
 			UdpPortManager: ports.NewPortManager("udp", cfg.ProxyBindAddr, cfg.AllowPorts),
 			UdpPortManager: ports.NewPortManager("udp", cfg.ProxyBindAddr, cfg.AllowPorts),
 		},
 		},
 		httpVhostRouter: vhost.NewVhostRouters(),
 		httpVhostRouter: vhost.NewVhostRouters(),
+		authVerifier:    auth.NewAuthVerifier(cfg.AuthServerConfig),
 		tlsConfig:       generateTLSConfig(),
 		tlsConfig:       generateTLSConfig(),
 		cfg:             cfg,
 		cfg:             cfg,
 	}
 	}
@@ -207,7 +212,7 @@ func NewService(cfg config.ServerCommonConf) (svr *Service, err error) {
 			}
 			}
 		}
 		}
 
 
-		svr.rc.VhostHttpsMuxer, err = vhost.NewHttpsMuxer(l, 30*time.Second)
+		svr.rc.VhostHttpsMuxer, err = vhost.NewHttpsMuxer(l, vhostReadWriteTimeout)
 		if err != nil {
 		if err != nil {
 			err = fmt.Errorf("Create vhost httpsMuxer error, %v", err)
 			err = fmt.Errorf("Create vhost httpsMuxer error, %v", err)
 			return
 			return
@@ -215,6 +220,23 @@ func NewService(cfg config.ServerCommonConf) (svr *Service, err error) {
 		log.Info("https service listen on %s:%d", cfg.ProxyBindAddr, cfg.VhostHttpsPort)
 		log.Info("https service listen on %s:%d", cfg.ProxyBindAddr, cfg.VhostHttpsPort)
 	}
 	}
 
 
+	// Create tcpmux httpconnect multiplexer.
+	if cfg.TcpMuxHttpConnectPort > 0 {
+		var l net.Listener
+		l, err = net.Listen("tcp", fmt.Sprintf("%s:%d", cfg.ProxyBindAddr, cfg.TcpMuxHttpConnectPort))
+		if err != nil {
+			err = fmt.Errorf("Create server listener error, %v", err)
+			return
+		}
+
+		svr.rc.TcpMuxHttpConnectMuxer, err = tcpmux.NewHttpConnectTcpMuxer(l, vhostReadWriteTimeout)
+		if err != nil {
+			err = fmt.Errorf("Create vhost tcpMuxer error, %v", err)
+			return
+		}
+		log.Info("tcpmux httpconnect multiplexer listen on %s:%d", cfg.ProxyBindAddr, cfg.TcpMuxHttpConnectPort)
+	}
+
 	// frp tls listener
 	// frp tls listener
 	svr.tlsListener = svr.muxer.Listen(1, 1, func(data []byte) bool {
 	svr.tlsListener = svr.muxer.Listen(1, 1, func(data []byte) bool {
 		return int(data[0]) == frpNet.FRP_TLS_HEAD_BYTE
 		return int(data[0]) == frpNet.FRP_TLS_HEAD_BYTE
@@ -251,8 +273,12 @@ func NewService(cfg config.ServerCommonConf) (svr *Service, err error) {
 		log.Info("Dashboard listen on %s:%d", cfg.DashboardAddr, cfg.DashboardPort)
 		log.Info("Dashboard listen on %s:%d", cfg.DashboardAddr, cfg.DashboardPort)
 		statsEnable = true
 		statsEnable = true
 	}
 	}
-
-	svr.statsCollector = stats.NewInternalCollector(statsEnable)
+	if statsEnable {
+		modelmetrics.EnableMem()
+		if cfg.EnablePrometheus {
+			modelmetrics.EnablePrometheus()
+		}
+	}
 	return
 	return
 }
 }
 
 
@@ -284,7 +310,7 @@ func (svr *Service) HandleListener(l net.Listener) {
 
 
 		log.Trace("start check TLS connection...")
 		log.Trace("start check TLS connection...")
 		originConn := c
 		originConn := c
-		c, err = frpNet.CheckAndEnableTLSServerConnWithTimeout(c, svr.tlsConfig, connReadTimeout)
+		c, err = frpNet.CheckAndEnableTLSServerConnWithTimeout(c, svr.tlsConfig, svr.cfg.TlsOnly, connReadTimeout)
 		if err != nil {
 		if err != nil {
 			log.Warn("CheckAndEnableTLSServerConnWithTimeout error: %v", err)
 			log.Warn("CheckAndEnableTLSServerConnWithTimeout error: %v", err)
 			originConn.Close()
 			originConn.Close()
@@ -322,18 +348,20 @@ func (svr *Service) HandleListener(l net.Listener) {
 						xl.Warn("register control error: %v", err)
 						xl.Warn("register control error: %v", err)
 						msg.WriteMsg(conn, &msg.LoginResp{
 						msg.WriteMsg(conn, &msg.LoginResp{
 							Version: version.Full(),
 							Version: version.Full(),
-							Error:   err.Error(),
+							Error:   util.GenerateResponseErrorString("register control error", err, svr.cfg.DetailedErrorsToClient),
 						})
 						})
 						conn.Close()
 						conn.Close()
 					}
 					}
 				case *msg.NewWorkConn:
 				case *msg.NewWorkConn:
-					svr.RegisterWorkConn(conn, m)
+					if err := svr.RegisterWorkConn(conn, m); err != nil {
+						conn.Close()
+					}
 				case *msg.NewVisitorConn:
 				case *msg.NewVisitorConn:
 					if err = svr.RegisterVisitorConn(conn, m); err != nil {
 					if err = svr.RegisterVisitorConn(conn, m); err != nil {
 						xl.Warn("register visitor conn error: %v", err)
 						xl.Warn("register visitor conn error: %v", err)
 						msg.WriteMsg(conn, &msg.NewVisitorConnResp{
 						msg.WriteMsg(conn, &msg.NewVisitorConnResp{
 							ProxyName: m.ProxyName,
 							ProxyName: m.ProxyName,
-							Error:     err.Error(),
+							Error:     util.GenerateResponseErrorString("register visitor conn error", err, svr.cfg.DetailedErrorsToClient),
 						})
 						})
 						conn.Close()
 						conn.Close()
 					} else {
 					} else {
@@ -399,13 +427,11 @@ func (svr *Service) RegisterControl(ctlConn net.Conn, loginMsg *msg.Login) (err
 	}
 	}
 
 
 	// Check auth.
 	// Check auth.
-	if util.GetAuthKey(svr.cfg.Token, loginMsg.Timestamp) != loginMsg.PrivilegeKey {
-		err = fmt.Errorf("authorization failed")
+	if err = svr.authVerifier.VerifyLogin(loginMsg); err != nil {
 		return
 		return
 	}
 	}
 
 
-	ctl := NewControl(ctx, svr.rc, svr.pxyManager, svr.pluginManager, svr.statsCollector, ctlConn, loginMsg, svr.cfg)
-
+	ctl := NewControl(ctx, svr.rc, svr.pxyManager, svr.pluginManager, svr.authVerifier, ctlConn, loginMsg, svr.cfg)
 	if oldCtl := svr.ctlManager.Add(loginMsg.RunId, ctl); oldCtl != nil {
 	if oldCtl := svr.ctlManager.Add(loginMsg.RunId, ctl); oldCtl != nil {
 		oldCtl.allShutdown.WaitDone()
 		oldCtl.allShutdown.WaitDone()
 	}
 	}
@@ -413,7 +439,7 @@ func (svr *Service) RegisterControl(ctlConn net.Conn, loginMsg *msg.Login) (err
 	ctl.Start()
 	ctl.Start()
 
 
 	// for statistics
 	// for statistics
-	svr.statsCollector.Mark(stats.TypeNewClient, &stats.NewClientPayload{})
+	metrics.Server.NewClient()
 
 
 	go func() {
 	go func() {
 		// block until control closed
 		// block until control closed
@@ -424,15 +450,22 @@ func (svr *Service) RegisterControl(ctlConn net.Conn, loginMsg *msg.Login) (err
 }
 }
 
 
 // RegisterWorkConn register a new work connection to control and proxies need it.
 // RegisterWorkConn register a new work connection to control and proxies need it.
-func (svr *Service) RegisterWorkConn(workConn net.Conn, newMsg *msg.NewWorkConn) {
+func (svr *Service) RegisterWorkConn(workConn net.Conn, newMsg *msg.NewWorkConn) error {
 	xl := frpNet.NewLogFromConn(workConn)
 	xl := frpNet.NewLogFromConn(workConn)
 	ctl, exist := svr.ctlManager.GetById(newMsg.RunId)
 	ctl, exist := svr.ctlManager.GetById(newMsg.RunId)
 	if !exist {
 	if !exist {
 		xl.Warn("No client control found for run id [%s]", newMsg.RunId)
 		xl.Warn("No client control found for run id [%s]", newMsg.RunId)
-		return
+		return fmt.Errorf("no client control found for run id [%s]", newMsg.RunId)
 	}
 	}
-	ctl.RegisterWorkConn(workConn)
-	return
+	// Check auth.
+	if err := svr.authVerifier.VerifyNewWorkConn(newMsg); err != nil {
+		xl.Warn("Invalid authentication in NewWorkConn message on run id [%s]", newMsg.RunId)
+		msg.WriteMsg(workConn, &msg.StartWorkConn{
+			Error: "invalid authentication in NewWorkConn",
+		})
+		return fmt.Errorf("invalid authentication in NewWorkConn message on run id [%s]", newMsg.RunId)
+	}
+	return ctl.RegisterWorkConn(workConn)
 }
 }
 
 
 func (svr *Service) RegisterVisitorConn(visitorConn net.Conn, newMsg *msg.NewVisitorConn) error {
 func (svr *Service) RegisterVisitorConn(visitorConn net.Conn, newMsg *msg.NewVisitorConn) error {

+ 0 - 277
server/stats/internal.go

@@ -1,277 +0,0 @@
-// Copyright 2019 fatedier, fatedier@gmail.com
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stats
-
-import (
-	"sync"
-	"time"
-
-	"github.com/fatedier/frp/utils/log"
-	"github.com/fatedier/frp/utils/metric"
-)
-
-type internalCollector struct {
-	enable bool
-	info   *ServerStatistics
-	mu     sync.Mutex
-}
-
-func NewInternalCollector(enable bool) Collector {
-	return &internalCollector{
-		enable: enable,
-		info: &ServerStatistics{
-			TotalTrafficIn:  metric.NewDateCounter(ReserveDays),
-			TotalTrafficOut: metric.NewDateCounter(ReserveDays),
-			CurConns:        metric.NewCounter(),
-
-			ClientCounts:    metric.NewCounter(),
-			ProxyTypeCounts: make(map[string]metric.Counter),
-
-			ProxyStatistics: make(map[string]*ProxyStatistics),
-		},
-	}
-}
-
-func (collector *internalCollector) Run() error {
-	go func() {
-		for {
-			time.Sleep(12 * time.Hour)
-			log.Debug("start to clear useless proxy statistics data...")
-			collector.ClearUselessInfo()
-			log.Debug("finish to clear useless proxy statistics data")
-		}
-	}()
-	return nil
-}
-
-func (collector *internalCollector) ClearUselessInfo() {
-	// To check if there are proxies that closed than 7 days and drop them.
-	collector.mu.Lock()
-	defer collector.mu.Unlock()
-	for name, data := range collector.info.ProxyStatistics {
-		if !data.LastCloseTime.IsZero() && time.Since(data.LastCloseTime) > time.Duration(7*24)*time.Hour {
-			delete(collector.info.ProxyStatistics, name)
-			log.Trace("clear proxy [%s]'s statistics data, lastCloseTime: [%s]", name, data.LastCloseTime.String())
-		}
-	}
-}
-
-func (collector *internalCollector) Mark(statsType StatsType, payload interface{}) {
-	if !collector.enable {
-		return
-	}
-
-	switch v := payload.(type) {
-	case *NewClientPayload:
-		collector.newClient(v)
-	case *CloseClientPayload:
-		collector.closeClient(v)
-	case *NewProxyPayload:
-		collector.newProxy(v)
-	case *CloseProxyPayload:
-		collector.closeProxy(v)
-	case *OpenConnectionPayload:
-		collector.openConnection(v)
-	case *CloseConnectionPayload:
-		collector.closeConnection(v)
-	case *AddTrafficInPayload:
-		collector.addTrafficIn(v)
-	case *AddTrafficOutPayload:
-		collector.addTrafficOut(v)
-	}
-}
-
-func (collector *internalCollector) newClient(payload *NewClientPayload) {
-	collector.info.ClientCounts.Inc(1)
-}
-
-func (collector *internalCollector) closeClient(payload *CloseClientPayload) {
-	collector.info.ClientCounts.Dec(1)
-}
-
-func (collector *internalCollector) newProxy(payload *NewProxyPayload) {
-	collector.mu.Lock()
-	defer collector.mu.Unlock()
-	counter, ok := collector.info.ProxyTypeCounts[payload.ProxyType]
-	if !ok {
-		counter = metric.NewCounter()
-	}
-	counter.Inc(1)
-	collector.info.ProxyTypeCounts[payload.ProxyType] = counter
-
-	proxyStats, ok := collector.info.ProxyStatistics[payload.Name]
-	if !(ok && proxyStats.ProxyType == payload.ProxyType) {
-		proxyStats = &ProxyStatistics{
-			Name:       payload.Name,
-			ProxyType:  payload.ProxyType,
-			CurConns:   metric.NewCounter(),
-			TrafficIn:  metric.NewDateCounter(ReserveDays),
-			TrafficOut: metric.NewDateCounter(ReserveDays),
-		}
-		collector.info.ProxyStatistics[payload.Name] = proxyStats
-	}
-	proxyStats.LastStartTime = time.Now()
-}
-
-func (collector *internalCollector) closeProxy(payload *CloseProxyPayload) {
-	collector.mu.Lock()
-	defer collector.mu.Unlock()
-	if counter, ok := collector.info.ProxyTypeCounts[payload.ProxyType]; ok {
-		counter.Dec(1)
-	}
-	if proxyStats, ok := collector.info.ProxyStatistics[payload.Name]; ok {
-		proxyStats.LastCloseTime = time.Now()
-	}
-}
-
-func (collector *internalCollector) openConnection(payload *OpenConnectionPayload) {
-	collector.info.CurConns.Inc(1)
-
-	collector.mu.Lock()
-	defer collector.mu.Unlock()
-	proxyStats, ok := collector.info.ProxyStatistics[payload.ProxyName]
-	if ok {
-		proxyStats.CurConns.Inc(1)
-		collector.info.ProxyStatistics[payload.ProxyName] = proxyStats
-	}
-}
-
-func (collector *internalCollector) closeConnection(payload *CloseConnectionPayload) {
-	collector.info.CurConns.Dec(1)
-
-	collector.mu.Lock()
-	defer collector.mu.Unlock()
-	proxyStats, ok := collector.info.ProxyStatistics[payload.ProxyName]
-	if ok {
-		proxyStats.CurConns.Dec(1)
-		collector.info.ProxyStatistics[payload.ProxyName] = proxyStats
-	}
-}
-
-func (collector *internalCollector) addTrafficIn(payload *AddTrafficInPayload) {
-	collector.info.TotalTrafficIn.Inc(payload.TrafficBytes)
-
-	collector.mu.Lock()
-	defer collector.mu.Unlock()
-
-	proxyStats, ok := collector.info.ProxyStatistics[payload.ProxyName]
-	if ok {
-		proxyStats.TrafficIn.Inc(payload.TrafficBytes)
-		collector.info.ProxyStatistics[payload.ProxyName] = proxyStats
-	}
-}
-
-func (collector *internalCollector) addTrafficOut(payload *AddTrafficOutPayload) {
-	collector.info.TotalTrafficOut.Inc(payload.TrafficBytes)
-
-	collector.mu.Lock()
-	defer collector.mu.Unlock()
-
-	proxyStats, ok := collector.info.ProxyStatistics[payload.ProxyName]
-	if ok {
-		proxyStats.TrafficOut.Inc(payload.TrafficBytes)
-		collector.info.ProxyStatistics[payload.ProxyName] = proxyStats
-	}
-}
-
-func (collector *internalCollector) GetServer() *ServerStats {
-	collector.mu.Lock()
-	defer collector.mu.Unlock()
-	s := &ServerStats{
-		TotalTrafficIn:  collector.info.TotalTrafficIn.TodayCount(),
-		TotalTrafficOut: collector.info.TotalTrafficOut.TodayCount(),
-		CurConns:        collector.info.CurConns.Count(),
-		ClientCounts:    collector.info.ClientCounts.Count(),
-		ProxyTypeCounts: make(map[string]int64),
-	}
-	for k, v := range collector.info.ProxyTypeCounts {
-		s.ProxyTypeCounts[k] = v.Count()
-	}
-	return s
-}
-
-func (collector *internalCollector) GetProxiesByType(proxyType string) []*ProxyStats {
-	res := make([]*ProxyStats, 0)
-	collector.mu.Lock()
-	defer collector.mu.Unlock()
-
-	for name, proxyStats := range collector.info.ProxyStatistics {
-		if proxyStats.ProxyType != proxyType {
-			continue
-		}
-
-		ps := &ProxyStats{
-			Name:            name,
-			Type:            proxyStats.ProxyType,
-			TodayTrafficIn:  proxyStats.TrafficIn.TodayCount(),
-			TodayTrafficOut: proxyStats.TrafficOut.TodayCount(),
-			CurConns:        proxyStats.CurConns.Count(),
-		}
-		if !proxyStats.LastStartTime.IsZero() {
-			ps.LastStartTime = proxyStats.LastStartTime.Format("01-02 15:04:05")
-		}
-		if !proxyStats.LastCloseTime.IsZero() {
-			ps.LastCloseTime = proxyStats.LastCloseTime.Format("01-02 15:04:05")
-		}
-		res = append(res, ps)
-	}
-	return res
-}
-
-func (collector *internalCollector) GetProxiesByTypeAndName(proxyType string, proxyName string) (res *ProxyStats) {
-	collector.mu.Lock()
-	defer collector.mu.Unlock()
-
-	for name, proxyStats := range collector.info.ProxyStatistics {
-		if proxyStats.ProxyType != proxyType {
-			continue
-		}
-
-		if name != proxyName {
-			continue
-		}
-
-		res = &ProxyStats{
-			Name:            name,
-			Type:            proxyStats.ProxyType,
-			TodayTrafficIn:  proxyStats.TrafficIn.TodayCount(),
-			TodayTrafficOut: proxyStats.TrafficOut.TodayCount(),
-			CurConns:        proxyStats.CurConns.Count(),
-		}
-		if !proxyStats.LastStartTime.IsZero() {
-			res.LastStartTime = proxyStats.LastStartTime.Format("01-02 15:04:05")
-		}
-		if !proxyStats.LastCloseTime.IsZero() {
-			res.LastCloseTime = proxyStats.LastCloseTime.Format("01-02 15:04:05")
-		}
-		break
-	}
-	return
-}
-
-func (collector *internalCollector) GetProxyTraffic(name string) (res *ProxyTrafficInfo) {
-	collector.mu.Lock()
-	defer collector.mu.Unlock()
-
-	proxyStats, ok := collector.info.ProxyStatistics[name]
-	if ok {
-		res = &ProxyTrafficInfo{
-			Name: name,
-		}
-		res.TrafficIn = proxyStats.TrafficIn.GetLastDaysCount(ReserveDays)
-		res.TrafficOut = proxyStats.TrafficOut.GetLastDaysCount(ReserveDays)
-	}
-	return
-}

+ 72 - 0
tests/ci/auth_test.go

@@ -0,0 +1,72 @@
+package ci
+
+import (
+	"os"
+	"testing"
+	"time"
+
+	"github.com/fatedier/frp/tests/config"
+	"github.com/fatedier/frp/tests/consts"
+	"github.com/fatedier/frp/tests/util"
+
+	"github.com/stretchr/testify/assert"
+)
+
+const FRPS_TOKEN_TCP_CONF = `
+[common]
+bind_addr = 0.0.0.0
+bind_port = 20000
+log_file = console
+log_level = debug
+authentication_method = token
+token = 123456
+`
+
+const FRPC_TOKEN_TCP_CONF = `
+[common]
+server_addr = 127.0.0.1
+server_port = 20000
+log_file = console
+log_level = debug
+authentication_method = token
+token = 123456
+protocol = tcp
+
+[tcp]
+type = tcp
+local_port = 10701
+remote_port = 20801
+`
+
+func TestTcpWithTokenAuthentication(t *testing.T) {
+	assert := assert.New(t)
+	frpsCfgPath, err := config.GenerateConfigFile(consts.FRPS_NORMAL_CONFIG, FRPS_TOKEN_TCP_CONF)
+	if assert.NoError(err) {
+		defer os.Remove(frpsCfgPath)
+	}
+
+	frpcCfgPath, err := config.GenerateConfigFile(consts.FRPC_NORMAL_CONFIG, FRPC_TOKEN_TCP_CONF)
+	if assert.NoError(err) {
+		defer os.Remove(frpcCfgPath)
+	}
+
+	frpsProcess := util.NewProcess(consts.FRPS_BIN_PATH, []string{"-c", frpsCfgPath})
+	err = frpsProcess.Start()
+	if assert.NoError(err) {
+		defer frpsProcess.Stop()
+	}
+
+	time.Sleep(200 * time.Millisecond)
+
+	frpcProcess := util.NewProcess(consts.FRPC_BIN_PATH, []string{"-c", frpcCfgPath})
+	err = frpcProcess.Start()
+	if assert.NoError(err) {
+		defer frpcProcess.Stop()
+	}
+	time.Sleep(500 * time.Millisecond)
+
+	// test tcp
+	res, err := util.SendTcpMsg("127.0.0.1:20801", consts.TEST_TCP_ECHO_STR)
+	assert.NoError(err)
+	assert.Equal(consts.TEST_TCP_ECHO_STR, res)
+}

+ 7 - 0
tests/ci/auto_test_frpc.ini

@@ -126,6 +126,13 @@ custom_domains = test6.frp.com
 host_header_rewrite = test6.frp.com
 host_header_rewrite = test6.frp.com
 header_X-From-Where = frp
 header_X-From-Where = frp
 
 
+[tcpmuxhttpconnect]
+type = tcpmux
+multiplexer = httpconnect
+local_ip = 127.0.0.1
+local_port = 10701
+custom_domains = tunnel1
+
 [wildcard_http]
 [wildcard_http]
 type = http
 type = http
 local_ip = 127.0.0.1
 local_ip = 127.0.0.1

+ 1 - 0
tests/ci/auto_test_frps.ini

@@ -2,6 +2,7 @@
 bind_addr = 0.0.0.0
 bind_addr = 0.0.0.0
 bind_port = 10700
 bind_port = 10700
 vhost_http_port = 10804
 vhost_http_port = 10804
+tcpmux_httpconnect_port = 10806
 log_level = trace
 log_level = trace
 token = 123456
 token = 123456
 allow_ports = 10000-20000,20002,30000-50000
 allow_ports = 10000-20000,20002,30000-50000

+ 11 - 0
tests/ci/normal_test.go

@@ -212,6 +212,17 @@ func TestHttp(t *testing.T) {
 	}
 	}
 }
 }
 
 
+func TestTcpMux(t *testing.T) {
+	assert := assert.New(t)
+
+	conn, err := gnet.DialTcpByProxy(fmt.Sprintf("http://%s:%d", "127.0.0.1", consts.TEST_TCP_MUX_FRP_PORT), "tunnel1")
+	if assert.NoError(err) {
+		res, err := util.SendTcpMsgByConn(conn, consts.TEST_TCP_ECHO_STR)
+		assert.NoError(err)
+		assert.Equal(consts.TEST_TCP_ECHO_STR, res)
+	}
+}
+
 func TestWebSocket(t *testing.T) {
 func TestWebSocket(t *testing.T) {
 	assert := assert.New(t)
 	assert := assert.New(t)
 
 

+ 92 - 0
tests/ci/tls_test.go

@@ -186,3 +186,95 @@ func TestTLSOverWebsocket(t *testing.T) {
 	assert.NoError(err)
 	assert.NoError(err)
 	assert.Equal(consts.TEST_TCP_ECHO_STR, res)
 	assert.Equal(consts.TEST_TCP_ECHO_STR, res)
 }
 }
+
+const FRPS_TLS_ONLY_TCP_CONF = `
+[common]
+bind_addr = 0.0.0.0
+bind_port = 20000
+log_file = console
+log_level = debug
+token = 123456
+tls_only = true
+`
+
+const FRPC_TLS_ONLY_TCP_CONF = `
+[common]
+server_addr = 127.0.0.1
+server_port = 20000
+log_file = console
+log_level = debug
+token = 123456
+protocol = tcp
+tls_enable = true
+
+[tcp]
+type = tcp
+local_port = 10701
+remote_port = 20801
+`
+
+const FRPC_TLS_ONLY_NO_TLS_TCP_CONF = `
+[common]
+server_addr = 127.0.0.1
+server_port = 20000
+log_file = console
+log_level = debug
+token = 123456
+protocol = tcp
+tls_enable = false
+
+[tcp]
+type = tcp
+local_port = 10701
+remote_port = 20802
+`
+
+func TestTlsOnlyOverTCP(t *testing.T) {
+	assert := assert.New(t)
+	frpsCfgPath, err := config.GenerateConfigFile(consts.FRPS_NORMAL_CONFIG, FRPS_TLS_ONLY_TCP_CONF)
+	if assert.NoError(err) {
+		defer os.Remove(frpsCfgPath)
+	}
+
+	frpcWithTlsCfgPath, err := config.GenerateConfigFile(consts.FRPC_NORMAL_CONFIG, FRPC_TLS_ONLY_TCP_CONF)
+	if assert.NoError(err) {
+		defer os.Remove(frpcWithTlsCfgPath)
+	}
+
+	frpsProcess := util.NewProcess(consts.FRPS_BIN_PATH, []string{"-c", frpsCfgPath})
+	err = frpsProcess.Start()
+	if assert.NoError(err) {
+		defer frpsProcess.Stop()
+	}
+
+	time.Sleep(200 * time.Millisecond)
+
+	frpcProcessWithTls := util.NewProcess(consts.FRPC_BIN_PATH, []string{"-c", frpcWithTlsCfgPath})
+	err = frpcProcessWithTls.Start()
+	if assert.NoError(err) {
+		defer frpcProcessWithTls.Stop()
+	}
+	time.Sleep(500 * time.Millisecond)
+
+	// test tcp over tls
+	res, err := util.SendTcpMsg("127.0.0.1:20801", consts.TEST_TCP_ECHO_STR)
+	assert.NoError(err)
+	assert.Equal(consts.TEST_TCP_ECHO_STR, res)
+	frpcProcessWithTls.Stop()
+
+	frpcWithoutTlsCfgPath, err := config.GenerateConfigFile(consts.FRPC_NORMAL_CONFIG, FRPC_TLS_ONLY_NO_TLS_TCP_CONF)
+	if assert.NoError(err) {
+		defer os.Remove(frpcWithTlsCfgPath)
+	}
+
+	frpcProcessWithoutTls := util.NewProcess(consts.FRPC_BIN_PATH, []string{"-c", frpcWithoutTlsCfgPath})
+	err = frpcProcessWithoutTls.Start()
+	if assert.NoError(err) {
+		defer frpcProcessWithoutTls.Stop()
+	}
+	time.Sleep(500 * time.Millisecond)
+
+	// test tcp without tls
+	_, err = util.SendTcpMsg("127.0.0.1:20802", consts.TEST_TCP_ECHO_STR)
+	assert.Error(err)
+}

+ 2 - 0
tests/consts/consts.go

@@ -40,6 +40,8 @@ var (
 	TEST_HTTP_FOO_STR    string = "http foo string: " + TEST_STR
 	TEST_HTTP_FOO_STR    string = "http foo string: " + TEST_STR
 	TEST_HTTP_BAR_STR    string = "http bar string: " + TEST_STR
 	TEST_HTTP_BAR_STR    string = "http bar string: " + TEST_STR
 
 
+	TEST_TCP_MUX_FRP_PORT int = 10806
+
 	TEST_STCP_FRP_PORT    int    = 10805
 	TEST_STCP_FRP_PORT    int    = 10805
 	TEST_STCP_EC_FRP_PORT int    = 10905
 	TEST_STCP_EC_FRP_PORT int    = 10905
 	TEST_STCP_ECHO_STR    string = "stcp type:" + TEST_STR
 	TEST_STCP_ECHO_STR    string = "stcp type:" + TEST_STR

+ 17 - 13
vendor/github.com/fatedier/golib/errors/errors.go → utils/metric/metrics.go

@@ -1,4 +1,4 @@
-// Copyright 2018 fatedier, fatedier@gmail.com
+// Copyright 2020 fatedier, fatedier@gmail.com
 //
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
 // you may not use this file except in compliance with the License.
@@ -12,19 +12,23 @@
 // See the License for the specific language governing permissions and
 // See the License for the specific language governing permissions and
 // limitations under the License.
 // limitations under the License.
 
 
-package errors
+package metric
 
 
-import (
-	"fmt"
-)
+// GaugeMetric represents a single numerical value that can arbitrarily go up
+// and down.
+type GaugeMetric interface {
+	Inc()
+	Dec()
+	Set(float64)
+}
 
 
-func PanicToError(fn func()) (err error) {
-	defer func() {
-		if r := recover(); r != nil {
-			err = fmt.Errorf("Panic error: %v", r)
-		}
-	}()
+// CounterMetric represents a single numerical value that only ever
+// goes up.
+type CounterMetric interface {
+	Inc()
+}
 
 
-	fn()
-	return
+// HistogramMetric counts individual observations.
+type HistogramMetric interface {
+	Observe(float64)
 }
 }

+ 6 - 1
utils/net/tls.go

@@ -16,6 +16,7 @@ package net
 
 
 import (
 import (
 	"crypto/tls"
 	"crypto/tls"
+	"fmt"
 	"net"
 	"net"
 	"time"
 	"time"
 
 
@@ -32,7 +33,7 @@ func WrapTLSClientConn(c net.Conn, tlsConfig *tls.Config) (out net.Conn) {
 	return
 	return
 }
 }
 
 
-func CheckAndEnableTLSServerConnWithTimeout(c net.Conn, tlsConfig *tls.Config, timeout time.Duration) (out net.Conn, err error) {
+func CheckAndEnableTLSServerConnWithTimeout(c net.Conn, tlsConfig *tls.Config, tlsOnly bool, timeout time.Duration) (out net.Conn, err error) {
 	sc, r := gnet.NewSharedConnSize(c, 2)
 	sc, r := gnet.NewSharedConnSize(c, 2)
 	buf := make([]byte, 1)
 	buf := make([]byte, 1)
 	var n int
 	var n int
@@ -46,6 +47,10 @@ func CheckAndEnableTLSServerConnWithTimeout(c net.Conn, tlsConfig *tls.Config, t
 	if n == 1 && int(buf[0]) == FRP_TLS_HEAD_BYTE {
 	if n == 1 && int(buf[0]) == FRP_TLS_HEAD_BYTE {
 		out = tls.Server(c, tlsConfig)
 		out = tls.Server(c, tlsConfig)
 	} else {
 	} else {
+		if tlsOnly {
+			err = fmt.Errorf("non-TLS connection received on a TlsOnly server")
+			return
+		}
 		out = sc
 		out = sc
 	}
 	}
 	return
 	return

+ 68 - 0
utils/tcpmux/httpconnect.go

@@ -0,0 +1,68 @@
+// Copyright 2020 guylewin, guy@lewin.co.il
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tcpmux
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"time"
+
+	"github.com/fatedier/frp/utils/util"
+	"github.com/fatedier/frp/utils/vhost"
+)
+
+type HttpConnectTcpMuxer struct {
+	*vhost.VhostMuxer
+}
+
+func NewHttpConnectTcpMuxer(listener net.Listener, timeout time.Duration) (*HttpConnectTcpMuxer, error) {
+	mux, err := vhost.NewVhostMuxer(listener, getHostFromHttpConnect, nil, sendHttpOk, nil, timeout)
+	return &HttpConnectTcpMuxer{mux}, err
+}
+
+func readHttpConnectRequest(rd io.Reader) (host string, err error) {
+	bufioReader := bufio.NewReader(rd)
+
+	req, err := http.ReadRequest(bufioReader)
+	if err != nil {
+		return
+	}
+
+	if req.Method != "CONNECT" {
+		err = fmt.Errorf("connections to tcp vhost must be of method CONNECT")
+		return
+	}
+
+	host = util.GetHostFromAddr(req.Host)
+	return
+}
+
+func sendHttpOk(c net.Conn) error {
+	return util.OkResponse().Write(c)
+}
+
+func getHostFromHttpConnect(c net.Conn) (_ net.Conn, _ map[string]string, err error) {
+	reqInfoMap := make(map[string]string, 0)
+	host, err := readHttpConnectRequest(c)
+	if err != nil {
+		return nil, reqInfoMap, err
+	}
+	reqInfoMap["Host"] = host
+	reqInfoMap["Scheme"] = "tcp"
+	return c, reqInfoMap, nil
+}

+ 44 - 0
utils/util/http.go

@@ -0,0 +1,44 @@
+// Copyright 2020 guylewin, guy@lewin.co.il
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+	"net/http"
+	"strings"
+)
+
+func OkResponse() *http.Response {
+	header := make(http.Header)
+
+	res := &http.Response{
+		Status:     "OK",
+		StatusCode: 200,
+		Proto:      "HTTP/1.1",
+		ProtoMajor: 1,
+		ProtoMinor: 1,
+		Header:     header,
+	}
+	return res
+}
+
+func GetHostFromAddr(addr string) (host string) {
+	strs := strings.Split(addr, ":")
+	if len(strs) > 1 {
+		host = strs[0]
+	} else {
+		host = addr
+	}
+	return
+}

+ 8 - 0
utils/util/util.go

@@ -101,3 +101,11 @@ func ParseRangeNumbers(rangeStr string) (numbers []int64, err error) {
 	}
 	}
 	return
 	return
 }
 }
+
+func GenerateResponseErrorString(summary string, err error, detailed bool) string {
+	if detailed {
+		return err.Error()
+	} else {
+		return summary
+	}
+}

+ 1 - 1
utils/version/version.go

@@ -19,7 +19,7 @@ import (
 	"strings"
 	"strings"
 )
 )
 
 
-var version string = "0.31.2"
+var version string = "0.32.0"
 
 
 func Full() string {
 func Full() string {
 	return version
 	return version

+ 4 - 14
utils/vhost/http.go

@@ -26,6 +26,7 @@ import (
 	"time"
 	"time"
 
 
 	frpLog "github.com/fatedier/frp/utils/log"
 	frpLog "github.com/fatedier/frp/utils/log"
+	"github.com/fatedier/frp/utils/util"
 
 
 	"github.com/fatedier/golib/pool"
 	"github.com/fatedier/golib/pool"
 )
 )
@@ -34,16 +35,6 @@ var (
 	ErrNoDomain = errors.New("no such domain")
 	ErrNoDomain = errors.New("no such domain")
 )
 )
 
 
-func getHostFromAddr(addr string) (host string) {
-	strs := strings.Split(addr, ":")
-	if len(strs) > 1 {
-		host = strs[0]
-	} else {
-		host = addr
-	}
-	return
-}
-
 type HttpReverseProxyOptions struct {
 type HttpReverseProxyOptions struct {
 	ResponseHeaderTimeoutS int64
 	ResponseHeaderTimeoutS int64
 }
 }
@@ -67,7 +58,7 @@ func NewHttpReverseProxy(option HttpReverseProxyOptions, vhostRouter *VhostRoute
 		Director: func(req *http.Request) {
 		Director: func(req *http.Request) {
 			req.URL.Scheme = "http"
 			req.URL.Scheme = "http"
 			url := req.Context().Value("url").(string)
 			url := req.Context().Value("url").(string)
-			oldHost := getHostFromAddr(req.Context().Value("host").(string))
+			oldHost := util.GetHostFromAddr(req.Context().Value("host").(string))
 			host := rp.GetRealHost(oldHost, url)
 			host := rp.GetRealHost(oldHost, url)
 			if host != "" {
 			if host != "" {
 				req.Host = host
 				req.Host = host
@@ -84,7 +75,7 @@ func NewHttpReverseProxy(option HttpReverseProxyOptions, vhostRouter *VhostRoute
 			DisableKeepAlives:     true,
 			DisableKeepAlives:     true,
 			DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
 			DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
 				url := ctx.Value("url").(string)
 				url := ctx.Value("url").(string)
-				host := getHostFromAddr(ctx.Value("host").(string))
+				host := util.GetHostFromAddr(ctx.Value("host").(string))
 				remote := ctx.Value("remote").(string)
 				remote := ctx.Value("remote").(string)
 				return rp.CreateConnection(host, url, remote)
 				return rp.CreateConnection(host, url, remote)
 			},
 			},
@@ -183,11 +174,10 @@ func (rp *HttpReverseProxy) getVhost(domain string, location string) (vr *VhostR
 		}
 		}
 		domainSplit = domainSplit[1:]
 		domainSplit = domainSplit[1:]
 	}
 	}
-	return
 }
 }
 
 
 func (rp *HttpReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
 func (rp *HttpReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
-	domain := getHostFromAddr(req.Host)
+	domain := util.GetHostFromAddr(req.Host)
 	location := req.URL.Path
 	location := req.URL.Path
 	user, passwd, _ := req.BasicAuth()
 	user, passwd, _ := req.BasicAuth()
 	if !rp.CheckAuth(domain, location, user, passwd) {
 	if !rp.CheckAuth(domain, location, user, passwd) {

+ 1 - 1
utils/vhost/https.go

@@ -48,7 +48,7 @@ type HttpsMuxer struct {
 }
 }
 
 
 func NewHttpsMuxer(listener net.Listener, timeout time.Duration) (*HttpsMuxer, error) {
 func NewHttpsMuxer(listener net.Listener, timeout time.Duration) (*HttpsMuxer, error) {
-	mux, err := NewVhostMuxer(listener, GetHttpsHostname, nil, nil, timeout)
+	mux, err := NewVhostMuxer(listener, GetHttpsHostname, nil, nil, nil, timeout)
 	return &HttpsMuxer{mux}, err
 	return &HttpsMuxer{mux}, err
 }
 }
 
 

+ 12 - 2
utils/vhost/vhost.go

@@ -29,22 +29,25 @@ import (
 type muxFunc func(net.Conn) (net.Conn, map[string]string, error)
 type muxFunc func(net.Conn) (net.Conn, map[string]string, error)
 type httpAuthFunc func(net.Conn, string, string, string) (bool, error)
 type httpAuthFunc func(net.Conn, string, string, string) (bool, error)
 type hostRewriteFunc func(net.Conn, string) (net.Conn, error)
 type hostRewriteFunc func(net.Conn, string) (net.Conn, error)
+type successFunc func(net.Conn) error
 
 
 type VhostMuxer struct {
 type VhostMuxer struct {
 	listener       net.Listener
 	listener       net.Listener
 	timeout        time.Duration
 	timeout        time.Duration
 	vhostFunc      muxFunc
 	vhostFunc      muxFunc
 	authFunc       httpAuthFunc
 	authFunc       httpAuthFunc
+	successFunc    successFunc
 	rewriteFunc    hostRewriteFunc
 	rewriteFunc    hostRewriteFunc
 	registryRouter *VhostRouters
 	registryRouter *VhostRouters
 }
 }
 
 
-func NewVhostMuxer(listener net.Listener, vhostFunc muxFunc, authFunc httpAuthFunc, rewriteFunc hostRewriteFunc, timeout time.Duration) (mux *VhostMuxer, err error) {
+func NewVhostMuxer(listener net.Listener, vhostFunc muxFunc, authFunc httpAuthFunc, successFunc successFunc, rewriteFunc hostRewriteFunc, timeout time.Duration) (mux *VhostMuxer, err error) {
 	mux = &VhostMuxer{
 	mux = &VhostMuxer{
 		listener:       listener,
 		listener:       listener,
 		timeout:        timeout,
 		timeout:        timeout,
 		vhostFunc:      vhostFunc,
 		vhostFunc:      vhostFunc,
 		authFunc:       authFunc,
 		authFunc:       authFunc,
+		successFunc:    successFunc,
 		rewriteFunc:    rewriteFunc,
 		rewriteFunc:    rewriteFunc,
 		registryRouter: NewVhostRouters(),
 		registryRouter: NewVhostRouters(),
 	}
 	}
@@ -113,7 +116,6 @@ func (v *VhostMuxer) getListener(name, path string) (l *Listener, exist bool) {
 		}
 		}
 		domainSplit = domainSplit[1:]
 		domainSplit = domainSplit[1:]
 	}
 	}
-	return
 }
 }
 
 
 func (v *VhostMuxer) run() {
 func (v *VhostMuxer) run() {
@@ -149,7 +151,15 @@ func (v *VhostMuxer) handle(c net.Conn) {
 		c.Close()
 		c.Close()
 		return
 		return
 	}
 	}
+
 	xl := xlog.FromContextSafe(l.ctx)
 	xl := xlog.FromContextSafe(l.ctx)
+	if v.successFunc != nil {
+		if err := v.successFunc(c); err != nil {
+			xl.Info("success func failure on vhost connection: %v", err)
+			c.Close()
+			return
+		}
+	}
 
 
 	// if authFunc is exist and userName/password is set
 	// if authFunc is exist and userName/password is set
 	// then verify user access
 	// then verify user access

+ 0 - 22
vendor/github.com/armon/go-socks5/.gitignore

@@ -1,22 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe

+ 0 - 4
vendor/github.com/armon/go-socks5/.travis.yml

@@ -1,4 +0,0 @@
-language: go
-go:
-  - 1.1
-  - tip

+ 0 - 20
vendor/github.com/armon/go-socks5/LICENSE

@@ -1,20 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 Armon Dadgar
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 0 - 45
vendor/github.com/armon/go-socks5/README.md

@@ -1,45 +0,0 @@
-go-socks5 [![Build Status](https://travis-ci.org/armon/go-socks5.png)](https://travis-ci.org/armon/go-socks5)
-=========
-
-Provides the `socks5` package that implements a [SOCKS5 server](http://en.wikipedia.org/wiki/SOCKS).
-SOCKS (Secure Sockets) is used to route traffic between a client and server through
-an intermediate proxy layer. This can be used to bypass firewalls or NATs.
-
-Feature
-=======
-
-The package has the following features:
-* "No Auth" mode
-* User/Password authentication
-* Support for the CONNECT command
-* Rules to do granular filtering of commands
-* Custom DNS resolution
-* Unit tests
-
-TODO
-====
-
-The package still needs the following:
-* Support for the BIND command
-* Support for the ASSOCIATE command
-
-
-Example
-=======
-
-Below is a simple example of usage
-
-```go
-// Create a SOCKS5 server
-conf := &socks5.Config{}
-server, err := socks5.New(conf)
-if err != nil {
-  panic(err)
-}
-
-// Create SOCKS5 proxy on localhost port 8000
-if err := server.ListenAndServe("tcp", "127.0.0.1:8000"); err != nil {
-  panic(err)
-}
-```
-

+ 0 - 151
vendor/github.com/armon/go-socks5/auth.go

@@ -1,151 +0,0 @@
-package socks5
-
-import (
-	"fmt"
-	"io"
-)
-
-const (
-	NoAuth          = uint8(0)
-	noAcceptable    = uint8(255)
-	UserPassAuth    = uint8(2)
-	userAuthVersion = uint8(1)
-	authSuccess     = uint8(0)
-	authFailure     = uint8(1)
-)
-
-var (
-	UserAuthFailed  = fmt.Errorf("User authentication failed")
-	NoSupportedAuth = fmt.Errorf("No supported authentication mechanism")
-)
-
-// A Request encapsulates authentication state provided
-// during negotiation
-type AuthContext struct {
-	// Provided auth method
-	Method uint8
-	// Payload provided during negotiation.
-	// Keys depend on the used auth method.
-	// For UserPassauth contains Username
-	Payload map[string]string
-}
-
-type Authenticator interface {
-	Authenticate(reader io.Reader, writer io.Writer) (*AuthContext, error)
-	GetCode() uint8
-}
-
-// NoAuthAuthenticator is used to handle the "No Authentication" mode
-type NoAuthAuthenticator struct{}
-
-func (a NoAuthAuthenticator) GetCode() uint8 {
-	return NoAuth
-}
-
-func (a NoAuthAuthenticator) Authenticate(reader io.Reader, writer io.Writer) (*AuthContext, error) {
-	_, err := writer.Write([]byte{socks5Version, NoAuth})
-	return &AuthContext{NoAuth, nil}, err
-}
-
-// UserPassAuthenticator is used to handle username/password based
-// authentication
-type UserPassAuthenticator struct {
-	Credentials CredentialStore
-}
-
-func (a UserPassAuthenticator) GetCode() uint8 {
-	return UserPassAuth
-}
-
-func (a UserPassAuthenticator) Authenticate(reader io.Reader, writer io.Writer) (*AuthContext, error) {
-	// Tell the client to use user/pass auth
-	if _, err := writer.Write([]byte{socks5Version, UserPassAuth}); err != nil {
-		return nil, err
-	}
-
-	// Get the version and username length
-	header := []byte{0, 0}
-	if _, err := io.ReadAtLeast(reader, header, 2); err != nil {
-		return nil, err
-	}
-
-	// Ensure we are compatible
-	if header[0] != userAuthVersion {
-		return nil, fmt.Errorf("Unsupported auth version: %v", header[0])
-	}
-
-	// Get the user name
-	userLen := int(header[1])
-	user := make([]byte, userLen)
-	if _, err := io.ReadAtLeast(reader, user, userLen); err != nil {
-		return nil, err
-	}
-
-	// Get the password length
-	if _, err := reader.Read(header[:1]); err != nil {
-		return nil, err
-	}
-
-	// Get the password
-	passLen := int(header[0])
-	pass := make([]byte, passLen)
-	if _, err := io.ReadAtLeast(reader, pass, passLen); err != nil {
-		return nil, err
-	}
-
-	// Verify the password
-	if a.Credentials.Valid(string(user), string(pass)) {
-		if _, err := writer.Write([]byte{userAuthVersion, authSuccess}); err != nil {
-			return nil, err
-		}
-	} else {
-		if _, err := writer.Write([]byte{userAuthVersion, authFailure}); err != nil {
-			return nil, err
-		}
-		return nil, UserAuthFailed
-	}
-
-	// Done
-	return &AuthContext{UserPassAuth, map[string]string{"Username": string(user)}}, nil
-}
-
-// authenticate is used to handle connection authentication
-func (s *Server) authenticate(conn io.Writer, bufConn io.Reader) (*AuthContext, error) {
-	// Get the methods
-	methods, err := readMethods(bufConn)
-	if err != nil {
-		return nil, fmt.Errorf("Failed to get auth methods: %v", err)
-	}
-
-	// Select a usable method
-	for _, method := range methods {
-		cator, found := s.authMethods[method]
-		if found {
-			return cator.Authenticate(bufConn, conn)
-		}
-	}
-
-	// No usable method found
-	return nil, noAcceptableAuth(conn)
-}
-
-// noAcceptableAuth is used to handle when we have no eligible
-// authentication mechanism
-func noAcceptableAuth(conn io.Writer) error {
-	conn.Write([]byte{socks5Version, noAcceptable})
-	return NoSupportedAuth
-}
-
-// readMethods is used to read the number of methods
-// and proceeding auth methods
-func readMethods(r io.Reader) ([]byte, error) {
-	header := []byte{0}
-	if _, err := r.Read(header); err != nil {
-		return nil, err
-	}
-
-	numMethods := int(header[0])
-	methods := make([]byte, numMethods)
-	_, err := io.ReadAtLeast(r, methods, numMethods)
-	return methods, err
-}

+ 0 - 17
vendor/github.com/armon/go-socks5/credentials.go

@@ -1,17 +0,0 @@
-package socks5
-
-// CredentialStore is used to support user/pass authentication
-type CredentialStore interface {
-	Valid(user, password string) bool
-}
-
-// StaticCredentials enables using a map directly as a credential store
-type StaticCredentials map[string]string
-
-func (s StaticCredentials) Valid(user, password string) bool {
-	pass, ok := s[user]
-	if !ok {
-		return false
-	}
-	return password == pass
-}

+ 0 - 364
vendor/github.com/armon/go-socks5/request.go

@@ -1,364 +0,0 @@
-package socks5
-
-import (
-	"fmt"
-	"io"
-	"net"
-	"strconv"
-	"strings"
-
-	"golang.org/x/net/context"
-)
-
-const (
-	ConnectCommand   = uint8(1)
-	BindCommand      = uint8(2)
-	AssociateCommand = uint8(3)
-	ipv4Address      = uint8(1)
-	fqdnAddress      = uint8(3)
-	ipv6Address      = uint8(4)
-)
-
-const (
-	successReply uint8 = iota
-	serverFailure
-	ruleFailure
-	networkUnreachable
-	hostUnreachable
-	connectionRefused
-	ttlExpired
-	commandNotSupported
-	addrTypeNotSupported
-)
-
-var (
-	unrecognizedAddrType = fmt.Errorf("Unrecognized address type")
-)
-
-// AddressRewriter is used to rewrite a destination transparently
-type AddressRewriter interface {
-	Rewrite(ctx context.Context, request *Request) (context.Context, *AddrSpec)
-}
-
-// AddrSpec is used to return the target AddrSpec
-// which may be specified as IPv4, IPv6, or a FQDN
-type AddrSpec struct {
-	FQDN string
-	IP   net.IP
-	Port int
-}
-
-func (a *AddrSpec) String() string {
-	if a.FQDN != "" {
-		return fmt.Sprintf("%s (%s):%d", a.FQDN, a.IP, a.Port)
-	}
-	return fmt.Sprintf("%s:%d", a.IP, a.Port)
-}
-
-// Address returns a string suitable to dial; prefer returning IP-based
-// address, fallback to FQDN
-func (a AddrSpec) Address() string {
-	if 0 != len(a.IP) {
-		return net.JoinHostPort(a.IP.String(), strconv.Itoa(a.Port))
-	}
-	return net.JoinHostPort(a.FQDN, strconv.Itoa(a.Port))
-}
-
-// A Request represents request received by a server
-type Request struct {
-	// Protocol version
-	Version uint8
-	// Requested command
-	Command uint8
-	// AuthContext provided during negotiation
-	AuthContext *AuthContext
-	// AddrSpec of the the network that sent the request
-	RemoteAddr *AddrSpec
-	// AddrSpec of the desired destination
-	DestAddr *AddrSpec
-	// AddrSpec of the actual destination (might be affected by rewrite)
-	realDestAddr *AddrSpec
-	bufConn      io.Reader
-}
-
-type conn interface {
-	Write([]byte) (int, error)
-	RemoteAddr() net.Addr
-}
-
-// NewRequest creates a new Request from the tcp connection
-func NewRequest(bufConn io.Reader) (*Request, error) {
-	// Read the version byte
-	header := []byte{0, 0, 0}
-	if _, err := io.ReadAtLeast(bufConn, header, 3); err != nil {
-		return nil, fmt.Errorf("Failed to get command version: %v", err)
-	}
-
-	// Ensure we are compatible
-	if header[0] != socks5Version {
-		return nil, fmt.Errorf("Unsupported command version: %v", header[0])
-	}
-
-	// Read in the destination address
-	dest, err := readAddrSpec(bufConn)
-	if err != nil {
-		return nil, err
-	}
-
-	request := &Request{
-		Version:  socks5Version,
-		Command:  header[1],
-		DestAddr: dest,
-		bufConn:  bufConn,
-	}
-
-	return request, nil
-}
-
-// handleRequest is used for request processing after authentication
-func (s *Server) handleRequest(req *Request, conn conn) error {
-	ctx := context.Background()
-
-	// Resolve the address if we have a FQDN
-	dest := req.DestAddr
-	if dest.FQDN != "" {
-		ctx_, addr, err := s.config.Resolver.Resolve(ctx, dest.FQDN)
-		if err != nil {
-			if err := sendReply(conn, hostUnreachable, nil); err != nil {
-				return fmt.Errorf("Failed to send reply: %v", err)
-			}
-			return fmt.Errorf("Failed to resolve destination '%v': %v", dest.FQDN, err)
-		}
-		ctx = ctx_
-		dest.IP = addr
-	}
-
-	// Apply any address rewrites
-	req.realDestAddr = req.DestAddr
-	if s.config.Rewriter != nil {
-		ctx, req.realDestAddr = s.config.Rewriter.Rewrite(ctx, req)
-	}
-
-	// Switch on the command
-	switch req.Command {
-	case ConnectCommand:
-		return s.handleConnect(ctx, conn, req)
-	case BindCommand:
-		return s.handleBind(ctx, conn, req)
-	case AssociateCommand:
-		return s.handleAssociate(ctx, conn, req)
-	default:
-		if err := sendReply(conn, commandNotSupported, nil); err != nil {
-			return fmt.Errorf("Failed to send reply: %v", err)
-		}
-		return fmt.Errorf("Unsupported command: %v", req.Command)
-	}
-}
-
-// handleConnect is used to handle a connect command
-func (s *Server) handleConnect(ctx context.Context, conn conn, req *Request) error {
-	// Check if this is allowed
-	if ctx_, ok := s.config.Rules.Allow(ctx, req); !ok {
-		if err := sendReply(conn, ruleFailure, nil); err != nil {
-			return fmt.Errorf("Failed to send reply: %v", err)
-		}
-		return fmt.Errorf("Connect to %v blocked by rules", req.DestAddr)
-	} else {
-		ctx = ctx_
-	}
-
-	// Attempt to connect
-	dial := s.config.Dial
-	if dial == nil {
-		dial = func(ctx context.Context, net_, addr string) (net.Conn, error) {
-			return net.Dial(net_, addr)
-		}
-	}
-	target, err := dial(ctx, "tcp", req.realDestAddr.Address())
-	if err != nil {
-		msg := err.Error()
-		resp := hostUnreachable
-		if strings.Contains(msg, "refused") {
-			resp = connectionRefused
-		} else if strings.Contains(msg, "network is unreachable") {
-			resp = networkUnreachable
-		}
-		if err := sendReply(conn, resp, nil); err != nil {
-			return fmt.Errorf("Failed to send reply: %v", err)
-		}
-		return fmt.Errorf("Connect to %v failed: %v", req.DestAddr, err)
-	}
-	defer target.Close()
-
-	// Send success
-	local := target.LocalAddr().(*net.TCPAddr)
-	bind := AddrSpec{IP: local.IP, Port: local.Port}
-	if err := sendReply(conn, successReply, &bind); err != nil {
-		return fmt.Errorf("Failed to send reply: %v", err)
-	}
-
-	// Start proxying
-	errCh := make(chan error, 2)
-	go proxy(target, req.bufConn, errCh)
-	go proxy(conn, target, errCh)
-
-	// Wait
-	for i := 0; i < 2; i++ {
-		e := <-errCh
-		if e != nil {
-			// return from this function closes target (and conn).
-			return e
-		}
-	}
-	return nil
-}
-
-// handleBind is used to handle a connect command
-func (s *Server) handleBind(ctx context.Context, conn conn, req *Request) error {
-	// Check if this is allowed
-	if ctx_, ok := s.config.Rules.Allow(ctx, req); !ok {
-		if err := sendReply(conn, ruleFailure, nil); err != nil {
-			return fmt.Errorf("Failed to send reply: %v", err)
-		}
-		return fmt.Errorf("Bind to %v blocked by rules", req.DestAddr)
-	} else {
-		ctx = ctx_
-	}
-
-	// TODO: Support bind
-	if err := sendReply(conn, commandNotSupported, nil); err != nil {
-		return fmt.Errorf("Failed to send reply: %v", err)
-	}
-	return nil
-}
-
-// handleAssociate is used to handle a connect command
-func (s *Server) handleAssociate(ctx context.Context, conn conn, req *Request) error {
-	// Check if this is allowed
-	if ctx_, ok := s.config.Rules.Allow(ctx, req); !ok {
-		if err := sendReply(conn, ruleFailure, nil); err != nil {
-			return fmt.Errorf("Failed to send reply: %v", err)
-		}
-		return fmt.Errorf("Associate to %v blocked by rules", req.DestAddr)
-	} else {
-		ctx = ctx_
-	}
-
-	// TODO: Support associate
-	if err := sendReply(conn, commandNotSupported, nil); err != nil {
-		return fmt.Errorf("Failed to send reply: %v", err)
-	}
-	return nil
-}
-
-// readAddrSpec is used to read AddrSpec.
-// Expects an address type byte, follwed by the address and port
-func readAddrSpec(r io.Reader) (*AddrSpec, error) {
-	d := &AddrSpec{}
-
-	// Get the address type
-	addrType := []byte{0}
-	if _, err := r.Read(addrType); err != nil {
-		return nil, err
-	}
-
-	// Handle on a per type basis
-	switch addrType[0] {
-	case ipv4Address:
-		addr := make([]byte, 4)
-		if _, err := io.ReadAtLeast(r, addr, len(addr)); err != nil {
-			return nil, err
-		}
-		d.IP = net.IP(addr)
-
-	case ipv6Address:
-		addr := make([]byte, 16)
-		if _, err := io.ReadAtLeast(r, addr, len(addr)); err != nil {
-			return nil, err
-		}
-		d.IP = net.IP(addr)
-
-	case fqdnAddress:
-		if _, err := r.Read(addrType); err != nil {
-			return nil, err
-		}
-		addrLen := int(addrType[0])
-		fqdn := make([]byte, addrLen)
-		if _, err := io.ReadAtLeast(r, fqdn, addrLen); err != nil {
-			return nil, err
-		}
-		d.FQDN = string(fqdn)
-
-	default:
-		return nil, unrecognizedAddrType
-	}
-
-	// Read the port
-	port := []byte{0, 0}
-	if _, err := io.ReadAtLeast(r, port, 2); err != nil {
-		return nil, err
-	}
-	d.Port = (int(port[0]) << 8) | int(port[1])
-
-	return d, nil
-}
-
-// sendReply is used to send a reply message
-func sendReply(w io.Writer, resp uint8, addr *AddrSpec) error {
-	// Format the address
-	var addrType uint8
-	var addrBody []byte
-	var addrPort uint16
-	switch {
-	case addr == nil:
-		addrType = ipv4Address
-		addrBody = []byte{0, 0, 0, 0}
-		addrPort = 0
-
-	case addr.FQDN != "":
-		addrType = fqdnAddress
-		addrBody = append([]byte{byte(len(addr.FQDN))}, addr.FQDN...)
-		addrPort = uint16(addr.Port)
-
-	case addr.IP.To4() != nil:
-		addrType = ipv4Address
-		addrBody = []byte(addr.IP.To4())
-		addrPort = uint16(addr.Port)
-
-	case addr.IP.To16() != nil:
-		addrType = ipv6Address
-		addrBody = []byte(addr.IP.To16())
-		addrPort = uint16(addr.Port)
-
-	default:
-		return fmt.Errorf("Failed to format address: %v", addr)
-	}
-
-	// Format the message
-	msg := make([]byte, 6+len(addrBody))
-	msg[0] = socks5Version
-	msg[1] = resp
-	msg[2] = 0 // Reserved
-	msg[3] = addrType
-	copy(msg[4:], addrBody)
-	msg[4+len(addrBody)] = byte(addrPort >> 8)
-	msg[4+len(addrBody)+1] = byte(addrPort & 0xff)
-
-	// Send the message
-	_, err := w.Write(msg)
-	return err
-}
-
-type closeWriter interface {
-	CloseWrite() error
-}
-
-// proxy is used to suffle data from src to destination, and sends errors
-// down a dedicated channel
-func proxy(dst io.Writer, src io.Reader, errCh chan error) {
-	_, err := io.Copy(dst, src)
-	if tcpConn, ok := dst.(closeWriter); ok {
-		tcpConn.CloseWrite()
-	}
-	errCh <- err
-}

+ 0 - 23
vendor/github.com/armon/go-socks5/resolver.go

@@ -1,23 +0,0 @@
-package socks5
-
-import (
-	"net"
-
-	"golang.org/x/net/context"
-)
-
-// NameResolver is used to implement custom name resolution
-type NameResolver interface {
-	Resolve(ctx context.Context, name string) (context.Context, net.IP, error)
-}
-
-// DNSResolver uses the system DNS to resolve host names
-type DNSResolver struct{}
-
-func (d DNSResolver) Resolve(ctx context.Context, name string) (context.Context, net.IP, error) {
-	addr, err := net.ResolveIPAddr("ip", name)
-	if err != nil {
-		return ctx, nil, err
-	}
-	return ctx, addr.IP, err
-}

+ 0 - 41
vendor/github.com/armon/go-socks5/ruleset.go

@@ -1,41 +0,0 @@
-package socks5
-
-import (
-	"golang.org/x/net/context"
-)
-
-// RuleSet is used to provide custom rules to allow or prohibit actions
-type RuleSet interface {
-	Allow(ctx context.Context, req *Request) (context.Context, bool)
-}
-
-// PermitAll returns a RuleSet which allows all types of connections
-func PermitAll() RuleSet {
-	return &PermitCommand{true, true, true}
-}
-
-// PermitNone returns a RuleSet which disallows all types of connections
-func PermitNone() RuleSet {
-	return &PermitCommand{false, false, false}
-}
-
-// PermitCommand is an implementation of the RuleSet which
-// enables filtering supported commands
-type PermitCommand struct {
-	EnableConnect   bool
-	EnableBind      bool
-	EnableAssociate bool
-}
-
-func (p *PermitCommand) Allow(ctx context.Context, req *Request) (context.Context, bool) {
-	switch req.Command {
-	case ConnectCommand:
-		return ctx, p.EnableConnect
-	case BindCommand:
-		return ctx, p.EnableBind
-	case AssociateCommand:
-		return ctx, p.EnableAssociate
-	}
-
-	return ctx, false
-}

+ 0 - 169
vendor/github.com/armon/go-socks5/socks5.go

@@ -1,169 +0,0 @@
-package socks5
-
-import (
-	"bufio"
-	"fmt"
-	"log"
-	"net"
-	"os"
-
-	"golang.org/x/net/context"
-)
-
-const (
-	socks5Version = uint8(5)
-)
-
-// Config is used to setup and configure a Server
-type Config struct {
-	// AuthMethods can be provided to implement custom authentication
-	// By default, "auth-less" mode is enabled.
-	// For password-based auth use UserPassAuthenticator.
-	AuthMethods []Authenticator
-
-	// If provided, username/password authentication is enabled,
-	// by appending a UserPassAuthenticator to AuthMethods. If not provided,
-	// and AUthMethods is nil, then "auth-less" mode is enabled.
-	Credentials CredentialStore
-
-	// Resolver can be provided to do custom name resolution.
-	// Defaults to DNSResolver if not provided.
-	Resolver NameResolver
-
-	// Rules is provided to enable custom logic around permitting
-	// various commands. If not provided, PermitAll is used.
-	Rules RuleSet
-
-	// Rewriter can be used to transparently rewrite addresses.
-	// This is invoked before the RuleSet is invoked.
-	// Defaults to NoRewrite.
-	Rewriter AddressRewriter
-
-	// BindIP is used for bind or udp associate
-	BindIP net.IP
-
-	// Logger can be used to provide a custom log target.
-	// Defaults to stdout.
-	Logger *log.Logger
-
-	// Optional function for dialing out
-	Dial func(ctx context.Context, network, addr string) (net.Conn, error)
-}
-
-// Server is reponsible for accepting connections and handling
-// the details of the SOCKS5 protocol
-type Server struct {
-	config      *Config
-	authMethods map[uint8]Authenticator
-}
-
-// New creates a new Server and potentially returns an error
-func New(conf *Config) (*Server, error) {
-	// Ensure we have at least one authentication method enabled
-	if len(conf.AuthMethods) == 0 {
-		if conf.Credentials != nil {
-			conf.AuthMethods = []Authenticator{&UserPassAuthenticator{conf.Credentials}}
-		} else {
-			conf.AuthMethods = []Authenticator{&NoAuthAuthenticator{}}
-		}
-	}
-
-	// Ensure we have a DNS resolver
-	if conf.Resolver == nil {
-		conf.Resolver = DNSResolver{}
-	}
-
-	// Ensure we have a rule set
-	if conf.Rules == nil {
-		conf.Rules = PermitAll()
-	}
-
-	// Ensure we have a log target
-	if conf.Logger == nil {
-		conf.Logger = log.New(os.Stdout, "", log.LstdFlags)
-	}
-
-	server := &Server{
-		config: conf,
-	}
-
-	server.authMethods = make(map[uint8]Authenticator)
-
-	for _, a := range conf.AuthMethods {
-		server.authMethods[a.GetCode()] = a
-	}
-
-	return server, nil
-}
-
-// ListenAndServe is used to create a listener and serve on it
-func (s *Server) ListenAndServe(network, addr string) error {
-	l, err := net.Listen(network, addr)
-	if err != nil {
-		return err
-	}
-	return s.Serve(l)
-}
-
-// Serve is used to serve connections from a listener
-func (s *Server) Serve(l net.Listener) error {
-	for {
-		conn, err := l.Accept()
-		if err != nil {
-			return err
-		}
-		go s.ServeConn(conn)
-	}
-	return nil
-}
-
-// ServeConn is used to serve a single connection.
-func (s *Server) ServeConn(conn net.Conn) error {
-	defer conn.Close()
-	bufConn := bufio.NewReader(conn)
-
-	// Read the version byte
-	version := []byte{0}
-	if _, err := bufConn.Read(version); err != nil {
-		s.config.Logger.Printf("[ERR] socks: Failed to get version byte: %v", err)
-		return err
-	}
-
-	// Ensure we are compatible
-	if version[0] != socks5Version {
-		err := fmt.Errorf("Unsupported SOCKS version: %v", version)
-		s.config.Logger.Printf("[ERR] socks: %v", err)
-		return err
-	}
-
-	// Authenticate the connection
-	authContext, err := s.authenticate(conn, bufConn)
-	if err != nil {
-		err = fmt.Errorf("Failed to authenticate: %v", err)
-		s.config.Logger.Printf("[ERR] socks: %v", err)
-		return err
-	}
-
-	request, err := NewRequest(bufConn)
-	if err != nil {
-		if err == unrecognizedAddrType {
-			if err := sendReply(conn, addrTypeNotSupported, nil); err != nil {
-				return fmt.Errorf("Failed to send reply: %v", err)
-			}
-		}
-		return fmt.Errorf("Failed to read destination address: %v", err)
-	}
-	request.AuthContext = authContext
-	if client, ok := conn.RemoteAddr().(*net.TCPAddr); ok {
-		request.RemoteAddr = &AddrSpec{IP: client.IP, Port: client.Port}
-	}
-
-	// Process the client request
-	if err := s.handleRequest(request, conn); err != nil {
-		err = fmt.Errorf("Failed to handle request: %v", err)
-		s.config.Logger.Printf("[ERR] socks: %v", err)
-		return err
-	}
-
-	return nil
-}

+ 2 - 0
vendor/github.com/coreos/go-oidc/.gitignore

@@ -0,0 +1,2 @@
+/bin
+/gopath

+ 16 - 0
vendor/github.com/coreos/go-oidc/.travis.yml

@@ -0,0 +1,16 @@
+language: go
+
+go:
+  - "1.12"
+  - "1.13"
+
+install:
+ - go get -v -t github.com/coreos/go-oidc/...
+ - go get golang.org/x/tools/cmd/cover
+ - go get golang.org/x/lint/golint
+
+script:
+ - ./test
+
+notifications:
+  email: false

+ 71 - 0
vendor/github.com/coreos/go-oidc/CONTRIBUTING.md

@@ -0,0 +1,71 @@
+# How to Contribute
+
+CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via
+GitHub pull requests.  This document outlines some of the conventions on
+development workflow, commit message formatting, contact points and other
+resources to make it easier to get your contribution accepted.
+
+# Certificate of Origin
+
+By contributing to this project you agree to the Developer Certificate of
+Origin (DCO). This document was created by the Linux Kernel community and is a
+simple statement that you, as a contributor, have the legal right to make the
+contribution. See the [DCO](DCO) file for details.
+
+# Email and Chat
+
+The project currently uses the general CoreOS email list and IRC channel:
+- Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev)
+- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org
+
+Please avoid emailing maintainers found in the MAINTAINERS file directly. They
+are very busy and read the mailing lists.
+
+## Getting Started
+
+- Fork the repository on GitHub
+- Read the [README](README.md) for build and test instructions
+- Play with the project, submit bugs, submit patches!
+
+## Contribution Flow
+
+This is a rough outline of what a contributor's workflow looks like:
+
+- Create a topic branch from where you want to base your work (usually master).
+- Make commits of logical units.
+- Make sure your commit messages are in the proper format (see below).
+- Push your changes to a topic branch in your fork of the repository.
+- Make sure the tests pass, and add any new tests as appropriate.
+- Submit a pull request to the original repository.
+
+Thanks for your contributions!
+
+### Format of the Commit Message
+
+We follow a rough convention for commit messages that is designed to answer two
+questions: what changed and why. The subject line should feature the what and
+the body of the commit should describe the why.
+
+```
+scripts: add the test-cluster command
+
+this uses tmux to setup a test cluster that you can easily kill and
+start for debugging.
+
+Fixes #38
+```
+
+The format can be described more formally as follows:
+
+```
+<subsystem>: <what changed>
+<BLANK LINE>
+<why this change was made>
+<BLANK LINE>
+<footer>
+```
+
+The first line is the subject and should be no longer than 70 characters, the
+second line is always blank, and other lines should be wrapped at 80 characters.
+This allows the message to be easier to read on GitHub as well as in various
+git tools.

+ 36 - 35
vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt → vendor/github.com/coreos/go-oidc/DCO

@@ -1,35 +1,36 @@
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2015- Klaus Post & Contributors.
-Email: klauspost@gmail.com
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
-    have the right to submit it under the open source license
-    indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
-    of my knowledge, is covered under an appropriate open source
-    license and I have the right under that license to submit that
-    work with modifications, whether created in whole or in part
-    by me, under the same open source license (unless I am
-    permitted to submit under a different license), as indicated
-    in the file; or
-
-(c) The contribution was provided directly to me by some other
-    person who certified (a), (b) or (c) and I have not modified
-    it.
-
-(d) I understand and agree that this project and the contribution
-    are public and that a record of the contribution (including all
-    personal information I submit with it, including my sign-off) is
-    maintained indefinitely and may be redistributed consistent with
-    this project or the open source license(s) involved.
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+    have the right to submit it under the open source license
+    indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+    of my knowledge, is covered under an appropriate open source
+    license and I have the right under that license to submit that
+    work with modifications, whether created in whole or in part
+    by me, under the same open source license (unless I am
+    permitted to submit under a different license), as indicated
+    in the file; or
+
+(c) The contribution was provided directly to me by some other
+    person who certified (a), (b) or (c) and I have not modified
+    it.
+
+(d) I understand and agree that this project and the contribution
+    are public and that a record of the contribution (including all
+    personal information I submit with it, including my sign-off) is
+    maintained indefinitely and may be redistributed consistent with
+    this project or the open source license(s) involved.

+ 2 - 1
vendor/github.com/pires/go-proxyproto/LICENSE → vendor/github.com/coreos/go-oidc/LICENSE

@@ -1,4 +1,4 @@
-                                 Apache License
+Apache License
                            Version 2.0, January 2004
                            Version 2.0, January 2004
                         http://www.apache.org/licenses/
                         http://www.apache.org/licenses/
 
 
@@ -199,3 +199,4 @@
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    See the License for the specific language governing permissions and
    limitations under the License.
    limitations under the License.
+

+ 3 - 0
vendor/github.com/coreos/go-oidc/MAINTAINERS

@@ -0,0 +1,3 @@
+Eric Chiang <ericchiang@google.com> (@ericchiang)
+Mike Danese <mikedanese@google.com> (@mikedanese)
+Rithu Leena John <rjohn@redhat.com> (@rithujohn191)

+ 5 - 0
vendor/github.com/coreos/go-oidc/NOTICE

@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2014 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).

+ 72 - 0
vendor/github.com/coreos/go-oidc/README.md

@@ -0,0 +1,72 @@
+# go-oidc
+
+[![GoDoc](https://godoc.org/github.com/coreos/go-oidc?status.svg)](https://godoc.org/github.com/coreos/go-oidc)
+[![Build Status](https://travis-ci.org/coreos/go-oidc.png?branch=master)](https://travis-ci.org/coreos/go-oidc)
+
+## OpenID Connect support for Go
+
+This package enables OpenID Connect support for the [golang.org/x/oauth2](https://godoc.org/golang.org/x/oauth2) package.
+
+```go
+provider, err := oidc.NewProvider(ctx, "https://accounts.google.com")
+if err != nil {
+    // handle error
+}
+
+// Configure an OpenID Connect aware OAuth2 client.
+oauth2Config := oauth2.Config{
+    ClientID:     clientID,
+    ClientSecret: clientSecret,
+    RedirectURL:  redirectURL,
+
+    // Discovery returns the OAuth2 endpoints.
+    Endpoint: provider.Endpoint(),
+
+    // "openid" is a required scope for OpenID Connect flows.
+    Scopes: []string{oidc.ScopeOpenID, "profile", "email"},
+}
+```
+
+OAuth2 redirects are unchanged.
+
+```go
+func handleRedirect(w http.ResponseWriter, r *http.Request) {
+    http.Redirect(w, r, oauth2Config.AuthCodeURL(state), http.StatusFound)
+}
+```
+
+The on responses, the provider can be used to verify ID Tokens.
+
+```go
+var verifier = provider.Verifier(&oidc.Config{ClientID: clientID})
+
+func handleOAuth2Callback(w http.ResponseWriter, r *http.Request) {
+    // Verify state and errors.
+
+    oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code"))
+    if err != nil {
+        // handle error
+    }
+
+    // Extract the ID Token from OAuth2 token.
+    rawIDToken, ok := oauth2Token.Extra("id_token").(string)
+    if !ok {
+        // handle missing token
+    }
+
+    // Parse and verify ID Token payload.
+    idToken, err := verifier.Verify(ctx, rawIDToken)
+    if err != nil {
+        // handle error
+    }
+
+    // Extract custom claims
+    var claims struct {
+        Email    string `json:"email"`
+        Verified bool   `json:"email_verified"`
+    }
+    if err := idToken.Claims(&claims); err != nil {
+        // handle error
+    }
+}
+```

+ 61 - 0
vendor/github.com/coreos/go-oidc/code-of-conduct.md

@@ -0,0 +1,61 @@
+## CoreOS Community Code of Conduct
+
+### Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of
+fostering an open and welcoming community, we pledge to respect all people who
+contribute through reporting issues, posting feature requests, updating
+documentation, submitting pull requests or patches, and other activities.
+
+We are committed to making participation in this project a harassment-free
+experience for everyone, regardless of level of experience, gender, gender
+identity and expression, sexual orientation, disability, personal appearance,
+body size, race, ethnicity, age, religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery
+* Personal attacks
+* Trolling or insulting/derogatory comments
+* Public or private harassment
+* Publishing others' private information, such as physical or electronic addresses, without explicit permission
+* Other unethical or unprofessional conduct.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct. By adopting this Code of Conduct,
+project maintainers commit themselves to fairly and consistently applying these
+principles to every aspect of managing this project. Project maintainers who do
+not follow or enforce the Code of Conduct may be permanently removed from the
+project team.
+
+This code of conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting a project maintainer, Brandon Philips
+<brandon.philips@coreos.com>, and/or Rithu John <rithu.john@coreos.com>.
+
+This Code of Conduct is adapted from the Contributor Covenant
+(http://contributor-covenant.org), version 1.2.0, available at
+http://contributor-covenant.org/version/1/2/0/
+
+### CoreOS Events Code of Conduct
+
+CoreOS events are working conferences intended for professional networking and
+collaboration in the CoreOS community. Attendees are expected to behave
+according to professional standards and in accordance with their employer’s
+policies on appropriate workplace behavior.
+
+While at CoreOS events or related social networking opportunities, attendees
+should not engage in discriminatory or offensive speech or actions including
+but not limited to gender, sexuality, race, age, disability, or religion.
+Speakers should be especially aware of these concerns.
+
+CoreOS does not condone any statements by speakers contrary to these standards.
+CoreOS reserves the right to deny entrance and/or eject from an event (without
+refund) any individual found to be engaging in discriminatory or offensive
+speech or actions.
+
+Please bring any concerns to the immediate attention of designated on-site
+staff, Brandon Philips <brandon.philips@coreos.com>, and/or Rithu John <rithu.john@coreos.com>.

+ 20 - 0
vendor/github.com/coreos/go-oidc/jose.go

@@ -0,0 +1,20 @@
+// +build !golint
+
+// Don't lint this file. We don't want to have to add a comment to each constant.
+
+package oidc
+
+const (
+	// JOSE asymmetric signing algorithm values as defined by RFC 7518
+	//
+	// see: https://tools.ietf.org/html/rfc7518#section-3.1
+	RS256 = "RS256" // RSASSA-PKCS-v1.5 using SHA-256
+	RS384 = "RS384" // RSASSA-PKCS-v1.5 using SHA-384
+	RS512 = "RS512" // RSASSA-PKCS-v1.5 using SHA-512
+	ES256 = "ES256" // ECDSA using P-256 and SHA-256
+	ES384 = "ES384" // ECDSA using P-384 and SHA-384
+	ES512 = "ES512" // ECDSA using P-521 and SHA-512
+	PS256 = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256
+	PS384 = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384
+	PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512
+)

+ 228 - 0
vendor/github.com/coreos/go-oidc/jwks.go

@@ -0,0 +1,228 @@
+package oidc
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"sync"
+	"time"
+
+	"github.com/pquerna/cachecontrol"
+	jose "gopkg.in/square/go-jose.v2"
+)
+
+// keysExpiryDelta is the allowed clock skew between a client and the OpenID Connect
+// server.
+//
+// When keys expire, they are valid for this amount of time after.
+//
+// If the keys have not expired, and an ID Token claims it was signed by a key not in
+// the cache, if and only if the keys expire in this amount of time, the keys will be
+// updated.
+const keysExpiryDelta = 30 * time.Second
+
+// NewRemoteKeySet returns a KeySet that can validate JSON web tokens by using HTTP
+// GETs to fetch JSON web token sets hosted at a remote URL. This is automatically
+// used by NewProvider using the URLs returned by OpenID Connect discovery, but is
+// exposed for providers that don't support discovery or to prevent round trips to the
+// discovery URL.
+//
+// The returned KeySet is a long lived verifier that caches keys based on cache-control
+// headers. Reuse a common remote key set instead of creating new ones as needed.
+//
+// The behavior of the returned KeySet is undefined once the context is canceled.
+func NewRemoteKeySet(ctx context.Context, jwksURL string) KeySet {
+	return newRemoteKeySet(ctx, jwksURL, time.Now)
+}
+
+func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time) *remoteKeySet {
+	if now == nil {
+		now = time.Now
+	}
+	return &remoteKeySet{jwksURL: jwksURL, ctx: ctx, now: now}
+}
+
+type remoteKeySet struct {
+	jwksURL string
+	ctx     context.Context
+	now     func() time.Time
+
+	// guard all other fields
+	mu sync.Mutex
+
+	// inflight suppresses parallel execution of updateKeys and allows
+	// multiple goroutines to wait for its result.
+	inflight *inflight
+
+	// A set of cached keys and their expiry.
+	cachedKeys []jose.JSONWebKey
+	expiry     time.Time
+}
+
+// inflight is used to wait on some in-flight request from multiple goroutines.
+type inflight struct {
+	doneCh chan struct{}
+
+	keys []jose.JSONWebKey
+	err  error
+}
+
+func newInflight() *inflight {
+	return &inflight{doneCh: make(chan struct{})}
+}
+
+// wait returns a channel that multiple goroutines can receive on. Once it returns
+// a value, the inflight request is done and result() can be inspected.
+func (i *inflight) wait() <-chan struct{} {
+	return i.doneCh
+}
+
+// done can only be called by a single goroutine. It records the result of the
+// inflight request and signals other goroutines that the result is safe to
+// inspect.
+func (i *inflight) done(keys []jose.JSONWebKey, err error) {
+	i.keys = keys
+	i.err = err
+	close(i.doneCh)
+}
+
+// result cannot be called until the wait() channel has returned a value.
+func (i *inflight) result() ([]jose.JSONWebKey, error) {
+	return i.keys, i.err
+}
+
+func (r *remoteKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) {
+	jws, err := jose.ParseSigned(jwt)
+	if err != nil {
+		return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
+	}
+	return r.verify(ctx, jws)
+}
+
+func (r *remoteKeySet) verify(ctx context.Context, jws *jose.JSONWebSignature) ([]byte, error) {
+	// We don't support JWTs signed with multiple signatures.
+	keyID := ""
+	for _, sig := range jws.Signatures {
+		keyID = sig.Header.KeyID
+		break
+	}
+
+	keys, expiry := r.keysFromCache()
+
+	// Don't check expiry yet. This optimizes for when the provider is unavailable.
+	for _, key := range keys {
+		if keyID == "" || key.KeyID == keyID {
+			if payload, err := jws.Verify(&key); err == nil {
+				return payload, nil
+			}
+		}
+	}
+
+	if !r.now().Add(keysExpiryDelta).After(expiry) {
+		// Keys haven't expired, don't refresh.
+		return nil, errors.New("failed to verify id token signature")
+	}
+
+	keys, err := r.keysFromRemote(ctx)
+	if err != nil {
+		return nil, fmt.Errorf("fetching keys %v", err)
+	}
+
+	for _, key := range keys {
+		if keyID == "" || key.KeyID == keyID {
+			if payload, err := jws.Verify(&key); err == nil {
+				return payload, nil
+			}
+		}
+	}
+	return nil, errors.New("failed to verify id token signature")
+}
+
+func (r *remoteKeySet) keysFromCache() (keys []jose.JSONWebKey, expiry time.Time) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	return r.cachedKeys, r.expiry
+}
+
+// keysFromRemote syncs the key set from the remote set, records the values in the
+// cache, and returns the key set.
+func (r *remoteKeySet) keysFromRemote(ctx context.Context) ([]jose.JSONWebKey, error) {
+	// Need to lock to inspect the inflight request field.
+	r.mu.Lock()
+	// If there's not a current inflight request, create one.
+	if r.inflight == nil {
+		r.inflight = newInflight()
+
+		// This goroutine has exclusive ownership over the current inflight
+		// request. It releases the resource by nil'ing the inflight field
+		// once the goroutine is done.
+		go func() {
+			// Sync keys and finish inflight when that's done.
+			keys, expiry, err := r.updateKeys()
+
+			r.inflight.done(keys, err)
+
+			// Lock to update the keys and indicate that there is no longer an
+			// inflight request.
+			r.mu.Lock()
+			defer r.mu.Unlock()
+
+			if err == nil {
+				r.cachedKeys = keys
+				r.expiry = expiry
+			}
+
+			// Free inflight so a different request can run.
+			r.inflight = nil
+		}()
+	}
+	inflight := r.inflight
+	r.mu.Unlock()
+
+	select {
+	case <-ctx.Done():
+		return nil, ctx.Err()
+	case <-inflight.wait():
+		return inflight.result()
+	}
+}
+
+func (r *remoteKeySet) updateKeys() ([]jose.JSONWebKey, time.Time, error) {
+	req, err := http.NewRequest("GET", r.jwksURL, nil)
+	if err != nil {
+		return nil, time.Time{}, fmt.Errorf("oidc: can't create request: %v", err)
+	}
+
+	resp, err := doRequest(r.ctx, req)
+	if err != nil {
+		return nil, time.Time{}, fmt.Errorf("oidc: get keys failed %v", err)
+	}
+	defer resp.Body.Close()
+
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, time.Time{}, fmt.Errorf("unable to read response body: %v", err)
+	}
+
+	if resp.StatusCode != http.StatusOK {
+		return nil, time.Time{}, fmt.Errorf("oidc: get keys failed: %s %s", resp.Status, body)
+	}
+
+	var keySet jose.JSONWebKeySet
+	err = unmarshalResp(resp, body, &keySet)
+	if err != nil {
+		return nil, time.Time{}, fmt.Errorf("oidc: failed to decode keys: %v %s", err, body)
+	}
+
+	// If the server doesn't provide cache control headers, assume the
+	// keys expire immediately.
+	expiry := r.now()
+
+	_, e, err := cachecontrol.CachableResponse(req, resp, cachecontrol.Options{})
+	if err == nil && e.After(expiry) {
+		expiry = e
+	}
+	return keySet.Keys, expiry, nil
+}

+ 409 - 0
vendor/github.com/coreos/go-oidc/oidc.go

@@ -0,0 +1,409 @@
+// Package oidc implements OpenID Connect client logic for the golang.org/x/oauth2 package.
+package oidc
+
+import (
+	"context"
+	"crypto/sha256"
+	"crypto/sha512"
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"hash"
+	"io/ioutil"
+	"mime"
+	"net/http"
+	"strings"
+	"time"
+
+	"golang.org/x/oauth2"
+	jose "gopkg.in/square/go-jose.v2"
+)
+
+const (
+	// ScopeOpenID is the mandatory scope for all OpenID Connect OAuth2 requests.
+	ScopeOpenID = "openid"
+
+	// ScopeOfflineAccess is an optional scope defined by OpenID Connect for requesting
+	// OAuth2 refresh tokens.
+	//
+	// Support for this scope differs between OpenID Connect providers. For instance
+	// Google rejects it, favoring appending "access_type=offline" as part of the
+	// authorization request instead.
+	//
+	// See: https://openid.net/specs/openid-connect-core-1_0.html#OfflineAccess
+	ScopeOfflineAccess = "offline_access"
+)
+
+var (
+	errNoAtHash      = errors.New("id token did not have an access token hash")
+	errInvalidAtHash = errors.New("access token hash does not match value in ID token")
+)
+
+// ClientContext returns a new Context that carries the provided HTTP client.
+//
+// This method sets the same context key used by the golang.org/x/oauth2 package,
+// so the returned context works for that package too.
+//
+//    myClient := &http.Client{}
+//    ctx := oidc.ClientContext(parentContext, myClient)
+//
+//    // This will use the custom client
+//    provider, err := oidc.NewProvider(ctx, "https://accounts.example.com")
+//
+func ClientContext(ctx context.Context, client *http.Client) context.Context {
+	return context.WithValue(ctx, oauth2.HTTPClient, client)
+}
+
+func doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
+	client := http.DefaultClient
+	if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok {
+		client = c
+	}
+	return client.Do(req.WithContext(ctx))
+}
+
+// Provider represents an OpenID Connect server's configuration.
+type Provider struct {
+	issuer      string
+	authURL     string
+	tokenURL    string
+	userInfoURL string
+	algorithms  []string
+
+	// Raw claims returned by the server.
+	rawClaims []byte
+
+	remoteKeySet KeySet
+}
+
+type cachedKeys struct {
+	keys   []jose.JSONWebKey
+	expiry time.Time
+}
+
+type providerJSON struct {
+	Issuer      string   `json:"issuer"`
+	AuthURL     string   `json:"authorization_endpoint"`
+	TokenURL    string   `json:"token_endpoint"`
+	JWKSURL     string   `json:"jwks_uri"`
+	UserInfoURL string   `json:"userinfo_endpoint"`
+	Algorithms  []string `json:"id_token_signing_alg_values_supported"`
+}
+
+// supportedAlgorithms is a list of algorithms explicitly supported by this
+// package. If a provider supports other algorithms, such as HS256 or none,
+// those values won't be passed to the IDTokenVerifier.
+var supportedAlgorithms = map[string]bool{
+	RS256: true,
+	RS384: true,
+	RS512: true,
+	ES256: true,
+	ES384: true,
+	ES512: true,
+	PS256: true,
+	PS384: true,
+	PS512: true,
+}
+
+// NewProvider uses the OpenID Connect discovery mechanism to construct a Provider.
+//
+// The issuer is the URL identifier for the service. For example: "https://accounts.google.com"
+// or "https://login.salesforce.com".
+func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
+	wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration"
+	req, err := http.NewRequest("GET", wellKnown, nil)
+	if err != nil {
+		return nil, err
+	}
+	resp, err := doRequest(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, fmt.Errorf("unable to read response body: %v", err)
+	}
+
+	if resp.StatusCode != http.StatusOK {
+		return nil, fmt.Errorf("%s: %s", resp.Status, body)
+	}
+
+	var p providerJSON
+	err = unmarshalResp(resp, body, &p)
+	if err != nil {
+		return nil, fmt.Errorf("oidc: failed to decode provider discovery object: %v", err)
+	}
+
+	if p.Issuer != issuer {
+		return nil, fmt.Errorf("oidc: issuer did not match the issuer returned by provider, expected %q got %q", issuer, p.Issuer)
+	}
+	var algs []string
+	for _, a := range p.Algorithms {
+		if supportedAlgorithms[a] {
+			algs = append(algs, a)
+		}
+	}
+	return &Provider{
+		issuer:       p.Issuer,
+		authURL:      p.AuthURL,
+		tokenURL:     p.TokenURL,
+		userInfoURL:  p.UserInfoURL,
+		algorithms:   algs,
+		rawClaims:    body,
+		remoteKeySet: NewRemoteKeySet(ctx, p.JWKSURL),
+	}, nil
+}
+
+// Claims unmarshals raw fields returned by the server during discovery.
+//
+//    var claims struct {
+//        ScopesSupported []string `json:"scopes_supported"`
+//        ClaimsSupported []string `json:"claims_supported"`
+//    }
+//
+//    if err := provider.Claims(&claims); err != nil {
+//        // handle unmarshaling error
+//    }
+//
+// For a list of fields defined by the OpenID Connect spec see:
+// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
+func (p *Provider) Claims(v interface{}) error {
+	if p.rawClaims == nil {
+		return errors.New("oidc: claims not set")
+	}
+	return json.Unmarshal(p.rawClaims, v)
+}
+
+// Endpoint returns the OAuth2 auth and token endpoints for the given provider.
+func (p *Provider) Endpoint() oauth2.Endpoint {
+	return oauth2.Endpoint{AuthURL: p.authURL, TokenURL: p.tokenURL}
+}
+
+// UserInfo represents the OpenID Connect userinfo claims.
+type UserInfo struct {
+	Subject       string `json:"sub"`
+	Profile       string `json:"profile"`
+	Email         string `json:"email"`
+	EmailVerified bool   `json:"email_verified"`
+
+	claims []byte
+}
+
+// Claims unmarshals the raw JSON object claims into the provided object.
+func (u *UserInfo) Claims(v interface{}) error {
+	if u.claims == nil {
+		return errors.New("oidc: claims not set")
+	}
+	return json.Unmarshal(u.claims, v)
+}
+
+// UserInfo uses the token source to query the provider's user info endpoint.
+func (p *Provider) UserInfo(ctx context.Context, tokenSource oauth2.TokenSource) (*UserInfo, error) {
+	if p.userInfoURL == "" {
+		return nil, errors.New("oidc: user info endpoint is not supported by this provider")
+	}
+
+	req, err := http.NewRequest("GET", p.userInfoURL, nil)
+	if err != nil {
+		return nil, fmt.Errorf("oidc: create GET request: %v", err)
+	}
+
+	token, err := tokenSource.Token()
+	if err != nil {
+		return nil, fmt.Errorf("oidc: get access token: %v", err)
+	}
+	token.SetAuthHeader(req)
+
+	resp, err := doRequest(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, err
+	}
+	if resp.StatusCode != http.StatusOK {
+		return nil, fmt.Errorf("%s: %s", resp.Status, body)
+	}
+
+	var userInfo UserInfo
+	if err := json.Unmarshal(body, &userInfo); err != nil {
+		return nil, fmt.Errorf("oidc: failed to decode userinfo: %v", err)
+	}
+	userInfo.claims = body
+	return &userInfo, nil
+}
+
+// IDToken is an OpenID Connect extension that provides a predictable representation
+// of an authorization event.
+//
+// The ID Token only holds fields OpenID Connect requires. To access additional
+// claims returned by the server, use the Claims method.
+type IDToken struct {
+	// The URL of the server which issued this token. OpenID Connect
+	// requires this value always be identical to the URL used for
+	// initial discovery.
+	//
+	// Note: Because of a known issue with Google Accounts' implementation
+	// this value may differ when using Google.
+	//
+	// See: https://developers.google.com/identity/protocols/OpenIDConnect#obtainuserinfo
+	Issuer string
+
+	// The client ID, or set of client IDs, that this token is issued for. For
+	// common uses, this is the client that initialized the auth flow.
+	//
+	// This package ensures the audience contains an expected value.
+	Audience []string
+
+	// A unique string which identifies the end user.
+	Subject string
+
+	// Expiry of the token. Ths package will not process tokens that have
+	// expired unless that validation is explicitly turned off.
+	Expiry time.Time
+	// When the token was issued by the provider.
+	IssuedAt time.Time
+
+	// Initial nonce provided during the authentication redirect.
+	//
+	// This package does NOT provided verification on the value of this field
+	// and it's the user's responsibility to ensure it contains a valid value.
+	Nonce string
+
+	// at_hash claim, if set in the ID token. Callers can verify an access token
+	// that corresponds to the ID token using the VerifyAccessToken method.
+	AccessTokenHash string
+
+	// signature algorithm used for ID token, needed to compute a verification hash of an
+	// access token
+	sigAlgorithm string
+
+	// Raw payload of the id_token.
+	claims []byte
+
+	// Map of distributed claim names to claim sources
+	distributedClaims map[string]claimSource
+}
+
+// Claims unmarshals the raw JSON payload of the ID Token into a provided struct.
+//
+//		idToken, err := idTokenVerifier.Verify(rawIDToken)
+//		if err != nil {
+//			// handle error
+//		}
+//		var claims struct {
+//			Email         string `json:"email"`
+//			EmailVerified bool   `json:"email_verified"`
+//		}
+//		if err := idToken.Claims(&claims); err != nil {
+//			// handle error
+//		}
+//
+func (i *IDToken) Claims(v interface{}) error {
+	if i.claims == nil {
+		return errors.New("oidc: claims not set")
+	}
+	return json.Unmarshal(i.claims, v)
+}
+
+// VerifyAccessToken verifies that the hash of the access token that corresponds to the iD token
+// matches the hash in the id token. It returns an error if the hashes  don't match.
+// It is the caller's responsibility to ensure that the optional access token hash is present for the ID token
+// before calling this method. See https://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken
+func (i *IDToken) VerifyAccessToken(accessToken string) error {
+	if i.AccessTokenHash == "" {
+		return errNoAtHash
+	}
+	var h hash.Hash
+	switch i.sigAlgorithm {
+	case RS256, ES256, PS256:
+		h = sha256.New()
+	case RS384, ES384, PS384:
+		h = sha512.New384()
+	case RS512, ES512, PS512:
+		h = sha512.New()
+	default:
+		return fmt.Errorf("oidc: unsupported signing algorithm %q", i.sigAlgorithm)
+	}
+	h.Write([]byte(accessToken)) // hash documents that Write will never return an error
+	sum := h.Sum(nil)[:h.Size()/2]
+	actual := base64.RawURLEncoding.EncodeToString(sum)
+	if actual != i.AccessTokenHash {
+		return errInvalidAtHash
+	}
+	return nil
+}
+
+type idToken struct {
+	Issuer       string                 `json:"iss"`
+	Subject      string                 `json:"sub"`
+	Audience     audience               `json:"aud"`
+	Expiry       jsonTime               `json:"exp"`
+	IssuedAt     jsonTime               `json:"iat"`
+	NotBefore    *jsonTime              `json:"nbf"`
+	Nonce        string                 `json:"nonce"`
+	AtHash       string                 `json:"at_hash"`
+	ClaimNames   map[string]string      `json:"_claim_names"`
+	ClaimSources map[string]claimSource `json:"_claim_sources"`
+}
+
+type claimSource struct {
+	Endpoint    string `json:"endpoint"`
+	AccessToken string `json:"access_token"`
+}
+
+type audience []string
+
+func (a *audience) UnmarshalJSON(b []byte) error {
+	var s string
+	if json.Unmarshal(b, &s) == nil {
+		*a = audience{s}
+		return nil
+	}
+	var auds []string
+	if err := json.Unmarshal(b, &auds); err != nil {
+		return err
+	}
+	*a = audience(auds)
+	return nil
+}
+
+type jsonTime time.Time
+
+func (j *jsonTime) UnmarshalJSON(b []byte) error {
+	var n json.Number
+	if err := json.Unmarshal(b, &n); err != nil {
+		return err
+	}
+	var unix int64
+
+	if t, err := n.Int64(); err == nil {
+		unix = t
+	} else {
+		f, err := n.Float64()
+		if err != nil {
+			return err
+		}
+		unix = int64(f)
+	}
+	*j = jsonTime(time.Unix(unix, 0))
+	return nil
+}
+
+func unmarshalResp(r *http.Response, body []byte, v interface{}) error {
+	err := json.Unmarshal(body, &v)
+	if err == nil {
+		return nil
+	}
+	ct := r.Header.Get("Content-Type")
+	mediaType, _, parseErr := mime.ParseMediaType(ct)
+	if parseErr == nil && mediaType == "application/json" {
+		return fmt.Errorf("got Content-Type = application/json, but could not unmarshal as JSON: %v", err)
+	}
+	return fmt.Errorf("expected Content-Type = application/json, got %q: %v", ct, err)
+}

+ 16 - 0
vendor/github.com/coreos/go-oidc/test

@@ -0,0 +1,16 @@
+#!/bin/bash
+
+set -e
+
+# Filter out any files with a !golint build tag.
+LINTABLE=$( go list -tags=golint -f '
+  {{- range $i, $file := .GoFiles -}}
+    {{ $file }} {{ end }}
+  {{ range $i, $file := .TestGoFiles -}}
+    {{ $file }} {{ end }}' github.com/coreos/go-oidc )
+
+go test -v -i -race github.com/coreos/go-oidc/...
+go test -v -race github.com/coreos/go-oidc/...
+golint -set_exit_status $LINTABLE
+go vet github.com/coreos/go-oidc/...
+go build -v ./example/...

+ 336 - 0
vendor/github.com/coreos/go-oidc/verify.go

@@ -0,0 +1,336 @@
+package oidc
+
+import (
+	"bytes"
+	"context"
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"strings"
+	"time"
+
+	"golang.org/x/oauth2"
+	jose "gopkg.in/square/go-jose.v2"
+)
+
+const (
+	issuerGoogleAccounts         = "https://accounts.google.com"
+	issuerGoogleAccountsNoScheme = "accounts.google.com"
+)
+
+// KeySet is a set of publc JSON Web Keys that can be used to validate the signature
+// of JSON web tokens. This is expected to be backed by a remote key set through
+// provider metadata discovery or an in-memory set of keys delivered out-of-band.
+type KeySet interface {
+	// VerifySignature parses the JSON web token, verifies the signature, and returns
+	// the raw payload. Header and claim fields are validated by other parts of the
+	// package. For example, the KeySet does not need to check values such as signature
+	// algorithm, issuer, and audience since the IDTokenVerifier validates these values
+	// independently.
+	//
+	// If VerifySignature makes HTTP requests to verify the token, it's expected to
+	// use any HTTP client associated with the context through ClientContext.
+	VerifySignature(ctx context.Context, jwt string) (payload []byte, err error)
+}
+
+// IDTokenVerifier provides verification for ID Tokens.
+type IDTokenVerifier struct {
+	keySet KeySet
+	config *Config
+	issuer string
+}
+
+// NewVerifier returns a verifier manually constructed from a key set and issuer URL.
+//
+// It's easier to use provider discovery to construct an IDTokenVerifier than creating
+// one directly. This method is intended to be used with provider that don't support
+// metadata discovery, or avoiding round trips when the key set URL is already known.
+//
+// This constructor can be used to create a verifier directly using the issuer URL and
+// JSON Web Key Set URL without using discovery:
+//
+//		keySet := oidc.NewRemoteKeySet(ctx, "https://www.googleapis.com/oauth2/v3/certs")
+//		verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config)
+//
+// Since KeySet is an interface, this constructor can also be used to supply custom
+// public key sources. For example, if a user wanted to supply public keys out-of-band
+// and hold them statically in-memory:
+//
+//		// Custom KeySet implementation.
+//		keySet := newStatisKeySet(publicKeys...)
+//
+//		// Verifier uses the custom KeySet implementation.
+//		verifier := oidc.NewVerifier("https://auth.example.com", keySet, config)
+//
+func NewVerifier(issuerURL string, keySet KeySet, config *Config) *IDTokenVerifier {
+	return &IDTokenVerifier{keySet: keySet, config: config, issuer: issuerURL}
+}
+
+// Config is the configuration for an IDTokenVerifier.
+type Config struct {
+	// Expected audience of the token. For a majority of the cases this is expected to be
+	// the ID of the client that initialized the login flow. It may occasionally differ if
+	// the provider supports the authorizing party (azp) claim.
+	//
+	// If not provided, users must explicitly set SkipClientIDCheck.
+	ClientID string
+	// If specified, only this set of algorithms may be used to sign the JWT.
+	//
+	// If the IDTokenVerifier is created from a provider with (*Provider).Verifier, this
+	// defaults to the set of algorithms the provider supports. Otherwise this values
+	// defaults to RS256.
+	SupportedSigningAlgs []string
+
+	// If true, no ClientID check performed. Must be true if ClientID field is empty.
+	SkipClientIDCheck bool
+	// If true, token expiry is not checked.
+	SkipExpiryCheck bool
+
+	// SkipIssuerCheck is intended for specialized cases where the the caller wishes to
+	// defer issuer validation. When enabled, callers MUST independently verify the Token's
+	// Issuer is a known good value.
+	//
+	// Mismatched issuers often indicate client mis-configuration. If mismatches are
+	// unexpected, evaluate if the provided issuer URL is incorrect instead of enabling
+	// this option.
+	SkipIssuerCheck bool
+
+	// Time function to check Token expiry. Defaults to time.Now
+	Now func() time.Time
+}
+
+// Verifier returns an IDTokenVerifier that uses the provider's key set to verify JWTs.
+//
+// The returned IDTokenVerifier is tied to the Provider's context and its behavior is
+// undefined once the Provider's context is canceled.
+func (p *Provider) Verifier(config *Config) *IDTokenVerifier {
+	if len(config.SupportedSigningAlgs) == 0 && len(p.algorithms) > 0 {
+		// Make a copy so we don't modify the config values.
+		cp := &Config{}
+		*cp = *config
+		cp.SupportedSigningAlgs = p.algorithms
+		config = cp
+	}
+	return NewVerifier(p.issuer, p.remoteKeySet, config)
+}
+
+func parseJWT(p string) ([]byte, error) {
+	parts := strings.Split(p, ".")
+	if len(parts) < 2 {
+		return nil, fmt.Errorf("oidc: malformed jwt, expected 3 parts got %d", len(parts))
+	}
+	payload, err := base64.RawURLEncoding.DecodeString(parts[1])
+	if err != nil {
+		return nil, fmt.Errorf("oidc: malformed jwt payload: %v", err)
+	}
+	return payload, nil
+}
+
+func contains(sli []string, ele string) bool {
+	for _, s := range sli {
+		if s == ele {
+			return true
+		}
+	}
+	return false
+}
+
+// Returns the Claims from the distributed JWT token
+func resolveDistributedClaim(ctx context.Context, verifier *IDTokenVerifier, src claimSource) ([]byte, error) {
+	req, err := http.NewRequest("GET", src.Endpoint, nil)
+	if err != nil {
+		return nil, fmt.Errorf("malformed request: %v", err)
+	}
+	if src.AccessToken != "" {
+		req.Header.Set("Authorization", "Bearer "+src.AccessToken)
+	}
+
+	resp, err := doRequest(ctx, req)
+	if err != nil {
+		return nil, fmt.Errorf("oidc: Request to endpoint failed: %v", err)
+	}
+	defer resp.Body.Close()
+
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, fmt.Errorf("unable to read response body: %v", err)
+	}
+
+	if resp.StatusCode != http.StatusOK {
+		return nil, fmt.Errorf("oidc: request failed: %v", resp.StatusCode)
+	}
+
+	token, err := verifier.Verify(ctx, string(body))
+	if err != nil {
+		return nil, fmt.Errorf("malformed response body: %v", err)
+	}
+
+	return token.claims, nil
+}
+
+func parseClaim(raw []byte, name string, v interface{}) error {
+	var parsed map[string]json.RawMessage
+	if err := json.Unmarshal(raw, &parsed); err != nil {
+		return err
+	}
+
+	val, ok := parsed[name]
+	if !ok {
+		return fmt.Errorf("claim doesn't exist: %s", name)
+	}
+
+	return json.Unmarshal([]byte(val), v)
+}
+
+// Verify parses a raw ID Token, verifies it's been signed by the provider, preforms
+// any additional checks depending on the Config, and returns the payload.
+//
+// Verify does NOT do nonce validation, which is the callers responsibility.
+//
+// See: https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation
+//
+//    oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code"))
+//    if err != nil {
+//        // handle error
+//    }
+//
+//    // Extract the ID Token from oauth2 token.
+//    rawIDToken, ok := oauth2Token.Extra("id_token").(string)
+//    if !ok {
+//        // handle error
+//    }
+//
+//    token, err := verifier.Verify(ctx, rawIDToken)
+//
+func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDToken, error) {
+	jws, err := jose.ParseSigned(rawIDToken)
+	if err != nil {
+		return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
+	}
+
+	// Throw out tokens with invalid claims before trying to verify the token. This lets
+	// us do cheap checks before possibly re-syncing keys.
+	payload, err := parseJWT(rawIDToken)
+	if err != nil {
+		return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
+	}
+	var token idToken
+	if err := json.Unmarshal(payload, &token); err != nil {
+		return nil, fmt.Errorf("oidc: failed to unmarshal claims: %v", err)
+	}
+
+	distributedClaims := make(map[string]claimSource)
+
+	//step through the token to map claim names to claim sources"
+	for cn, src := range token.ClaimNames {
+		if src == "" {
+			return nil, fmt.Errorf("oidc: failed to obtain source from claim name")
+		}
+		s, ok := token.ClaimSources[src]
+		if !ok {
+			return nil, fmt.Errorf("oidc: source does not exist")
+		}
+		distributedClaims[cn] = s
+	}
+
+	t := &IDToken{
+		Issuer:            token.Issuer,
+		Subject:           token.Subject,
+		Audience:          []string(token.Audience),
+		Expiry:            time.Time(token.Expiry),
+		IssuedAt:          time.Time(token.IssuedAt),
+		Nonce:             token.Nonce,
+		AccessTokenHash:   token.AtHash,
+		claims:            payload,
+		distributedClaims: distributedClaims,
+	}
+
+	// Check issuer.
+	if !v.config.SkipIssuerCheck && t.Issuer != v.issuer {
+		// Google sometimes returns "accounts.google.com" as the issuer claim instead of
+		// the required "https://accounts.google.com". Detect this case and allow it only
+		// for Google.
+		//
+		// We will not add hooks to let other providers go off spec like this.
+		if !(v.issuer == issuerGoogleAccounts && t.Issuer == issuerGoogleAccountsNoScheme) {
+			return nil, fmt.Errorf("oidc: id token issued by a different provider, expected %q got %q", v.issuer, t.Issuer)
+		}
+	}
+
+	// If a client ID has been provided, make sure it's part of the audience. SkipClientIDCheck must be true if ClientID is empty.
+	//
+	// This check DOES NOT ensure that the ClientID is the party to which the ID Token was issued (i.e. Authorized party).
+	if !v.config.SkipClientIDCheck {
+		if v.config.ClientID != "" {
+			if !contains(t.Audience, v.config.ClientID) {
+				return nil, fmt.Errorf("oidc: expected audience %q got %q", v.config.ClientID, t.Audience)
+			}
+		} else {
+			return nil, fmt.Errorf("oidc: invalid configuration, clientID must be provided or SkipClientIDCheck must be set")
+		}
+	}
+
+	// If a SkipExpiryCheck is false, make sure token is not expired.
+	if !v.config.SkipExpiryCheck {
+		now := time.Now
+		if v.config.Now != nil {
+			now = v.config.Now
+		}
+		nowTime := now()
+
+		if t.Expiry.Before(nowTime) {
+			return nil, fmt.Errorf("oidc: token is expired (Token Expiry: %v)", t.Expiry)
+		}
+
+		// If nbf claim is provided in token, ensure that it is indeed in the past.
+		if token.NotBefore != nil {
+			nbfTime := time.Time(*token.NotBefore)
+			leeway := 1 * time.Minute
+
+			if nowTime.Add(leeway).Before(nbfTime) {
+				return nil, fmt.Errorf("oidc: current time %v before the nbf (not before) time: %v", nowTime, nbfTime)
+			}
+		}
+	}
+
+	switch len(jws.Signatures) {
+	case 0:
+		return nil, fmt.Errorf("oidc: id token not signed")
+	case 1:
+	default:
+		return nil, fmt.Errorf("oidc: multiple signatures on id token not supported")
+	}
+
+	sig := jws.Signatures[0]
+	supportedSigAlgs := v.config.SupportedSigningAlgs
+	if len(supportedSigAlgs) == 0 {
+		supportedSigAlgs = []string{RS256}
+	}
+
+	if !contains(supportedSigAlgs, sig.Header.Algorithm) {
+		return nil, fmt.Errorf("oidc: id token signed with unsupported algorithm, expected %q got %q", supportedSigAlgs, sig.Header.Algorithm)
+	}
+
+	t.sigAlgorithm = sig.Header.Algorithm
+
+	gotPayload, err := v.keySet.VerifySignature(ctx, rawIDToken)
+	if err != nil {
+		return nil, fmt.Errorf("failed to verify signature: %v", err)
+	}
+
+	// Ensure that the payload returned by the square actually matches the payload parsed earlier.
+	if !bytes.Equal(gotPayload, payload) {
+		return nil, errors.New("oidc: internal error, payload parsed did not match previous payload")
+	}
+
+	return t, nil
+}
+
+// Nonce returns an auth code option which requires the ID Token created by the
+// OpenID Connect provider to contain the specified nonce.
+func Nonce(nonce string) oauth2.AuthCodeOption {
+	return oauth2.SetAuthURLParam("nonce", nonce)
+}

+ 0 - 15
vendor/github.com/davecgh/go-spew/LICENSE

@@ -1,15 +0,0 @@
-ISC License
-
-Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
-
-Permission to use, copy, modify, and distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

+ 0 - 152
vendor/github.com/davecgh/go-spew/spew/bypass.go

@@ -1,152 +0,0 @@
-// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
-//
-// Permission to use, copy, modify, and distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-// NOTE: Due to the following build constraints, this file will only be compiled
-// when the code is not running on Google App Engine, compiled by GopherJS, and
-// "-tags safe" is not added to the go build command line.  The "disableunsafe"
-// tag is deprecated and thus should not be used.
-// +build !js,!appengine,!safe,!disableunsafe
-
-package spew
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-const (
-	// UnsafeDisabled is a build-time constant which specifies whether or
-	// not access to the unsafe package is available.
-	UnsafeDisabled = false
-
-	// ptrSize is the size of a pointer on the current arch.
-	ptrSize = unsafe.Sizeof((*byte)(nil))
-)
-
-var (
-	// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
-	// internal reflect.Value fields.  These values are valid before golang
-	// commit ecccf07e7f9d which changed the format.  The are also valid
-	// after commit 82f48826c6c7 which changed the format again to mirror
-	// the original format.  Code in the init function updates these offsets
-	// as necessary.
-	offsetPtr    = uintptr(ptrSize)
-	offsetScalar = uintptr(0)
-	offsetFlag   = uintptr(ptrSize * 2)
-
-	// flagKindWidth and flagKindShift indicate various bits that the
-	// reflect package uses internally to track kind information.
-	//
-	// flagRO indicates whether or not the value field of a reflect.Value is
-	// read-only.
-	//
-	// flagIndir indicates whether the value field of a reflect.Value is
-	// the actual data or a pointer to the data.
-	//
-	// These values are valid before golang commit 90a7c3c86944 which
-	// changed their positions.  Code in the init function updates these
-	// flags as necessary.
-	flagKindWidth = uintptr(5)
-	flagKindShift = uintptr(flagKindWidth - 1)
-	flagRO        = uintptr(1 << 0)
-	flagIndir     = uintptr(1 << 1)
-)
-
-func init() {
-	// Older versions of reflect.Value stored small integers directly in the
-	// ptr field (which is named val in the older versions).  Versions
-	// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
-	// scalar for this purpose which unfortunately came before the flag
-	// field, so the offset of the flag field is different for those
-	// versions.
-	//
-	// This code constructs a new reflect.Value from a known small integer
-	// and checks if the size of the reflect.Value struct indicates it has
-	// the scalar field. When it does, the offsets are updated accordingly.
-	vv := reflect.ValueOf(0xf00)
-	if unsafe.Sizeof(vv) == (ptrSize * 4) {
-		offsetScalar = ptrSize * 2
-		offsetFlag = ptrSize * 3
-	}
-
-	// Commit 90a7c3c86944 changed the flag positions such that the low
-	// order bits are the kind.  This code extracts the kind from the flags
-	// field and ensures it's the correct type.  When it's not, the flag
-	// order has been changed to the newer format, so the flags are updated
-	// accordingly.
-	upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
-	upfv := *(*uintptr)(upf)
-	flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
-	if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
-		flagKindShift = 0
-		flagRO = 1 << 5
-		flagIndir = 1 << 6
-
-		// Commit adf9b30e5594 modified the flags to separate the
-		// flagRO flag into two bits which specifies whether or not the
-		// field is embedded.  This causes flagIndir to move over a bit
-		// and means that flagRO is the combination of either of the
-		// original flagRO bit and the new bit.
-		//
-		// This code detects the change by extracting what used to be
-		// the indirect bit to ensure it's set.  When it's not, the flag
-		// order has been changed to the newer format, so the flags are
-		// updated accordingly.
-		if upfv&flagIndir == 0 {
-			flagRO = 3 << 5
-			flagIndir = 1 << 7
-		}
-	}
-}
-
-// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
-// the typical safety restrictions preventing access to unaddressable and
-// unexported data.  It works by digging the raw pointer to the underlying
-// value out of the protected value and generating a new unprotected (unsafe)
-// reflect.Value to it.
-//
-// This allows us to check for implementations of the Stringer and error
-// interfaces to be used for pretty printing ordinarily unaddressable and
-// inaccessible values such as unexported struct fields.
-func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
-	indirects := 1
-	vt := v.Type()
-	upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
-	rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
-	if rvf&flagIndir != 0 {
-		vt = reflect.PtrTo(v.Type())
-		indirects++
-	} else if offsetScalar != 0 {
-		// The value is in the scalar field when it's not one of the
-		// reference types.
-		switch vt.Kind() {
-		case reflect.Uintptr:
-		case reflect.Chan:
-		case reflect.Func:
-		case reflect.Map:
-		case reflect.Ptr:
-		case reflect.UnsafePointer:
-		default:
-			upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
-				offsetScalar)
-		}
-	}
-
-	pv := reflect.NewAt(vt, upv)
-	rv = pv
-	for i := 0; i < indirects; i++ {
-		rv = rv.Elem()
-	}
-	return rv
-}

+ 0 - 38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go

@@ -1,38 +0,0 @@
-// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
-//
-// Permission to use, copy, modify, and distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-// NOTE: Due to the following build constraints, this file will only be compiled
-// when the code is running on Google App Engine, compiled by GopherJS, or
-// "-tags safe" is added to the go build command line.  The "disableunsafe"
-// tag is deprecated and thus should not be used.
-// +build js appengine safe disableunsafe
-
-package spew
-
-import "reflect"
-
-const (
-	// UnsafeDisabled is a build-time constant which specifies whether or
-	// not access to the unsafe package is available.
-	UnsafeDisabled = true
-)
-
-// unsafeReflectValue typically converts the passed reflect.Value into a one
-// that bypasses the typical safety restrictions preventing access to
-// unaddressable and unexported data.  However, doing this relies on access to
-// the unsafe package.  This is a stub version which simply returns the passed
-// reflect.Value when the unsafe package is not available.
-func unsafeReflectValue(v reflect.Value) reflect.Value {
-	return v
-}

+ 0 - 341
vendor/github.com/davecgh/go-spew/spew/common.go

@@ -1,341 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-package spew
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"reflect"
-	"sort"
-	"strconv"
-)
-
-// Some constants in the form of bytes to avoid string overhead.  This mirrors
-// the technique used in the fmt package.
-var (
-	panicBytes            = []byte("(PANIC=")
-	plusBytes             = []byte("+")
-	iBytes                = []byte("i")
-	trueBytes             = []byte("true")
-	falseBytes            = []byte("false")
-	interfaceBytes        = []byte("(interface {})")
-	commaNewlineBytes     = []byte(",\n")
-	newlineBytes          = []byte("\n")
-	openBraceBytes        = []byte("{")
-	openBraceNewlineBytes = []byte("{\n")
-	closeBraceBytes       = []byte("}")
-	asteriskBytes         = []byte("*")
-	colonBytes            = []byte(":")
-	colonSpaceBytes       = []byte(": ")
-	openParenBytes        = []byte("(")
-	closeParenBytes       = []byte(")")
-	spaceBytes            = []byte(" ")
-	pointerChainBytes     = []byte("->")
-	nilAngleBytes         = []byte("<nil>")
-	maxNewlineBytes       = []byte("<max depth reached>\n")
-	maxShortBytes         = []byte("<max>")
-	circularBytes         = []byte("<already shown>")
-	circularShortBytes    = []byte("<shown>")
-	invalidAngleBytes     = []byte("<invalid>")
-	openBracketBytes      = []byte("[")
-	closeBracketBytes     = []byte("]")
-	percentBytes          = []byte("%")
-	precisionBytes        = []byte(".")
-	openAngleBytes        = []byte("<")
-	closeAngleBytes       = []byte(">")
-	openMapBytes          = []byte("map[")
-	closeMapBytes         = []byte("]")
-	lenEqualsBytes        = []byte("len=")
-	capEqualsBytes        = []byte("cap=")
-)
-
-// hexDigits is used to map a decimal value to a hex digit.
-var hexDigits = "0123456789abcdef"
-
-// catchPanic handles any panics that might occur during the handleMethods
-// calls.
-func catchPanic(w io.Writer, v reflect.Value) {
-	if err := recover(); err != nil {
-		w.Write(panicBytes)
-		fmt.Fprintf(w, "%v", err)
-		w.Write(closeParenBytes)
-	}
-}
-
-// handleMethods attempts to call the Error and String methods on the underlying
-// type the passed reflect.Value represents and outputes the result to Writer w.
-//
-// It handles panics in any called methods by catching and displaying the error
-// as the formatted value.
-func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
-	// We need an interface to check if the type implements the error or
-	// Stringer interface.  However, the reflect package won't give us an
-	// interface on certain things like unexported struct fields in order
-	// to enforce visibility rules.  We use unsafe, when it's available,
-	// to bypass these restrictions since this package does not mutate the
-	// values.
-	if !v.CanInterface() {
-		if UnsafeDisabled {
-			return false
-		}
-
-		v = unsafeReflectValue(v)
-	}
-
-	// Choose whether or not to do error and Stringer interface lookups against
-	// the base type or a pointer to the base type depending on settings.
-	// Technically calling one of these methods with a pointer receiver can
-	// mutate the value, however, types which choose to satisify an error or
-	// Stringer interface with a pointer receiver should not be mutating their
-	// state inside these interface methods.
-	if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
-		v = unsafeReflectValue(v)
-	}
-	if v.CanAddr() {
-		v = v.Addr()
-	}
-
-	// Is it an error or Stringer?
-	switch iface := v.Interface().(type) {
-	case error:
-		defer catchPanic(w, v)
-		if cs.ContinueOnMethod {
-			w.Write(openParenBytes)
-			w.Write([]byte(iface.Error()))
-			w.Write(closeParenBytes)
-			w.Write(spaceBytes)
-			return false
-		}
-
-		w.Write([]byte(iface.Error()))
-		return true
-
-	case fmt.Stringer:
-		defer catchPanic(w, v)
-		if cs.ContinueOnMethod {
-			w.Write(openParenBytes)
-			w.Write([]byte(iface.String()))
-			w.Write(closeParenBytes)
-			w.Write(spaceBytes)
-			return false
-		}
-		w.Write([]byte(iface.String()))
-		return true
-	}
-	return false
-}
-
-// printBool outputs a boolean value as true or false to Writer w.
-func printBool(w io.Writer, val bool) {
-	if val {
-		w.Write(trueBytes)
-	} else {
-		w.Write(falseBytes)
-	}
-}
-
-// printInt outputs a signed integer value to Writer w.
-func printInt(w io.Writer, val int64, base int) {
-	w.Write([]byte(strconv.FormatInt(val, base)))
-}
-
-// printUint outputs an unsigned integer value to Writer w.
-func printUint(w io.Writer, val uint64, base int) {
-	w.Write([]byte(strconv.FormatUint(val, base)))
-}
-
-// printFloat outputs a floating point value using the specified precision,
-// which is expected to be 32 or 64bit, to Writer w.
-func printFloat(w io.Writer, val float64, precision int) {
-	w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
-}
-
-// printComplex outputs a complex value using the specified float precision
-// for the real and imaginary parts to Writer w.
-func printComplex(w io.Writer, c complex128, floatPrecision int) {
-	r := real(c)
-	w.Write(openParenBytes)
-	w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
-	i := imag(c)
-	if i >= 0 {
-		w.Write(plusBytes)
-	}
-	w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
-	w.Write(iBytes)
-	w.Write(closeParenBytes)
-}
-
-// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
-// prefix to Writer w.
-func printHexPtr(w io.Writer, p uintptr) {
-	// Null pointer.
-	num := uint64(p)
-	if num == 0 {
-		w.Write(nilAngleBytes)
-		return
-	}
-
-	// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
-	buf := make([]byte, 18)
-
-	// It's simpler to construct the hex string right to left.
-	base := uint64(16)
-	i := len(buf) - 1
-	for num >= base {
-		buf[i] = hexDigits[num%base]
-		num /= base
-		i--
-	}
-	buf[i] = hexDigits[num]
-
-	// Add '0x' prefix.
-	i--
-	buf[i] = 'x'
-	i--
-	buf[i] = '0'
-
-	// Strip unused leading bytes.
-	buf = buf[i:]
-	w.Write(buf)
-}
-
-// valuesSorter implements sort.Interface to allow a slice of reflect.Value
-// elements to be sorted.
-type valuesSorter struct {
-	values  []reflect.Value
-	strings []string // either nil or same len and values
-	cs      *ConfigState
-}
-
-// newValuesSorter initializes a valuesSorter instance, which holds a set of
-// surrogate keys on which the data should be sorted.  It uses flags in
-// ConfigState to decide if and how to populate those surrogate keys.
-func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
-	vs := &valuesSorter{values: values, cs: cs}
-	if canSortSimply(vs.values[0].Kind()) {
-		return vs
-	}
-	if !cs.DisableMethods {
-		vs.strings = make([]string, len(values))
-		for i := range vs.values {
-			b := bytes.Buffer{}
-			if !handleMethods(cs, &b, vs.values[i]) {
-				vs.strings = nil
-				break
-			}
-			vs.strings[i] = b.String()
-		}
-	}
-	if vs.strings == nil && cs.SpewKeys {
-		vs.strings = make([]string, len(values))
-		for i := range vs.values {
-			vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
-		}
-	}
-	return vs
-}
-
-// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
-// directly, or whether it should be considered for sorting by surrogate keys
-// (if the ConfigState allows it).
-func canSortSimply(kind reflect.Kind) bool {
-	// This switch parallels valueSortLess, except for the default case.
-	switch kind {
-	case reflect.Bool:
-		return true
-	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
-		return true
-	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
-		return true
-	case reflect.Float32, reflect.Float64:
-		return true
-	case reflect.String:
-		return true
-	case reflect.Uintptr:
-		return true
-	case reflect.Array:
-		return true
-	}
-	return false
-}
-
-// Len returns the number of values in the slice.  It is part of the
-// sort.Interface implementation.
-func (s *valuesSorter) Len() int {
-	return len(s.values)
-}
-
-// Swap swaps the values at the passed indices.  It is part of the
-// sort.Interface implementation.
-func (s *valuesSorter) Swap(i, j int) {
-	s.values[i], s.values[j] = s.values[j], s.values[i]
-	if s.strings != nil {
-		s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
-	}
-}
-
-// valueSortLess returns whether the first value should sort before the second
-// value.  It is used by valueSorter.Less as part of the sort.Interface
-// implementation.
-func valueSortLess(a, b reflect.Value) bool {
-	switch a.Kind() {
-	case reflect.Bool:
-		return !a.Bool() && b.Bool()
-	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
-		return a.Int() < b.Int()
-	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
-		return a.Uint() < b.Uint()
-	case reflect.Float32, reflect.Float64:
-		return a.Float() < b.Float()
-	case reflect.String:
-		return a.String() < b.String()
-	case reflect.Uintptr:
-		return a.Uint() < b.Uint()
-	case reflect.Array:
-		// Compare the contents of both arrays.
-		l := a.Len()
-		for i := 0; i < l; i++ {
-			av := a.Index(i)
-			bv := b.Index(i)
-			if av.Interface() == bv.Interface() {
-				continue
-			}
-			return valueSortLess(av, bv)
-		}
-	}
-	return a.String() < b.String()
-}
-
-// Less returns whether the value at index i should sort before the
-// value at index j.  It is part of the sort.Interface implementation.
-func (s *valuesSorter) Less(i, j int) bool {
-	if s.strings == nil {
-		return valueSortLess(s.values[i], s.values[j])
-	}
-	return s.strings[i] < s.strings[j]
-}
-
-// sortValues is a sort function that handles both native types and any type that
-// can be converted to error or Stringer.  Other inputs are sorted according to
-// their Value.String() value to ensure display stability.
-func sortValues(values []reflect.Value, cs *ConfigState) {
-	if len(values) == 0 {
-		return
-	}
-	sort.Sort(newValuesSorter(values, cs))
-}

+ 0 - 306
vendor/github.com/davecgh/go-spew/spew/config.go

@@ -1,306 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-package spew
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"os"
-)
-
-// ConfigState houses the configuration options used by spew to format and
-// display values.  There is a global instance, Config, that is used to control
-// all top-level Formatter and Dump functionality.  Each ConfigState instance
-// provides methods equivalent to the top-level functions.
-//
-// The zero value for ConfigState provides no indentation.  You would typically
-// want to set it to a space or a tab.
-//
-// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
-// with default settings.  See the documentation of NewDefaultConfig for default
-// values.
-type ConfigState struct {
-	// Indent specifies the string to use for each indentation level.  The
-	// global config instance that all top-level functions use set this to a
-	// single space by default.  If you would like more indentation, you might
-	// set this to a tab with "\t" or perhaps two spaces with "  ".
-	Indent string
-
-	// MaxDepth controls the maximum number of levels to descend into nested
-	// data structures.  The default, 0, means there is no limit.
-	//
-	// NOTE: Circular data structures are properly detected, so it is not
-	// necessary to set this value unless you specifically want to limit deeply
-	// nested data structures.
-	MaxDepth int
-
-	// DisableMethods specifies whether or not error and Stringer interfaces are
-	// invoked for types that implement them.
-	DisableMethods bool
-
-	// DisablePointerMethods specifies whether or not to check for and invoke
-	// error and Stringer interfaces on types which only accept a pointer
-	// receiver when the current type is not a pointer.
-	//
-	// NOTE: This might be an unsafe action since calling one of these methods
-	// with a pointer receiver could technically mutate the value, however,
-	// in practice, types which choose to satisify an error or Stringer
-	// interface with a pointer receiver should not be mutating their state
-	// inside these interface methods.  As a result, this option relies on
-	// access to the unsafe package, so it will not have any effect when
-	// running in environments without access to the unsafe package such as
-	// Google App Engine or with the "safe" build tag specified.
-	DisablePointerMethods bool
-
-	// DisablePointerAddresses specifies whether to disable the printing of
-	// pointer addresses. This is useful when diffing data structures in tests.
-	DisablePointerAddresses bool
-
-	// DisableCapacities specifies whether to disable the printing of capacities
-	// for arrays, slices, maps and channels. This is useful when diffing
-	// data structures in tests.
-	DisableCapacities bool
-
-	// ContinueOnMethod specifies whether or not recursion should continue once
-	// a custom error or Stringer interface is invoked.  The default, false,
-	// means it will print the results of invoking the custom error or Stringer
-	// interface and return immediately instead of continuing to recurse into
-	// the internals of the data type.
-	//
-	// NOTE: This flag does not have any effect if method invocation is disabled
-	// via the DisableMethods or DisablePointerMethods options.
-	ContinueOnMethod bool
-
-	// SortKeys specifies map keys should be sorted before being printed. Use
-	// this to have a more deterministic, diffable output.  Note that only
-	// native types (bool, int, uint, floats, uintptr and string) and types
-	// that support the error or Stringer interfaces (if methods are
-	// enabled) are supported, with other types sorted according to the
-	// reflect.Value.String() output which guarantees display stability.
-	SortKeys bool
-
-	// SpewKeys specifies that, as a last resort attempt, map keys should
-	// be spewed to strings and sorted by those strings.  This is only
-	// considered if SortKeys is true.
-	SpewKeys bool
-}
-
-// Config is the active configuration of the top-level functions.
-// The configuration can be changed by modifying the contents of spew.Config.
-var Config = ConfigState{Indent: " "}
-
-// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter.  It returns
-// the formatted string as a value that satisfies error.  See NewFormatter
-// for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
-	return fmt.Errorf(format, c.convertArgs(a)...)
-}
-
-// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter.  It returns
-// the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
-	return fmt.Fprint(w, c.convertArgs(a)...)
-}
-
-// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter.  It returns
-// the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
-	return fmt.Fprintf(w, format, c.convertArgs(a)...)
-}
-
-// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
-// passed with a Formatter interface returned by c.NewFormatter.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
-	return fmt.Fprintln(w, c.convertArgs(a)...)
-}
-
-// Print is a wrapper for fmt.Print that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter.  It returns
-// the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
-	return fmt.Print(c.convertArgs(a)...)
-}
-
-// Printf is a wrapper for fmt.Printf that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter.  It returns
-// the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
-	return fmt.Printf(format, c.convertArgs(a)...)
-}
-
-// Println is a wrapper for fmt.Println that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter.  It returns
-// the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
-	return fmt.Println(c.convertArgs(a)...)
-}
-
-// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter.  It returns
-// the resulting string.  See NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Sprint(a ...interface{}) string {
-	return fmt.Sprint(c.convertArgs(a)...)
-}
-
-// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
-// passed with a Formatter interface returned by c.NewFormatter.  It returns
-// the resulting string.  See NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
-	return fmt.Sprintf(format, c.convertArgs(a)...)
-}
-
-// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
-// were passed with a Formatter interface returned by c.NewFormatter.  It
-// returns the resulting string.  See NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
-func (c *ConfigState) Sprintln(a ...interface{}) string {
-	return fmt.Sprintln(c.convertArgs(a)...)
-}
-
-/*
-NewFormatter returns a custom formatter that satisfies the fmt.Formatter
-interface.  As a result, it integrates cleanly with standard fmt package
-printing functions.  The formatter is useful for inline printing of smaller data
-types similar to the standard %v format specifier.
-
-The custom formatter only responds to the %v (most compact), %+v (adds pointer
-addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
-combinations.  Any other verbs such as %x and %q will be sent to the the
-standard fmt package for formatting.  In addition, the custom formatter ignores
-the width and precision arguments (however they will still work on the format
-specifiers not handled by the custom formatter).
-
-Typically this function shouldn't be called directly.  It is much easier to make
-use of the custom formatter by calling one of the convenience functions such as
-c.Printf, c.Println, or c.Printf.
-*/
-func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
-	return newFormatter(c, v)
-}
-
-// Fdump formats and displays the passed arguments to io.Writer w.  It formats
-// exactly the same as Dump.
-func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
-	fdump(c, w, a...)
-}
-
-/*
-Dump displays the passed parameters to standard out with newlines, customizable
-indentation, and additional debug information such as complete types and all
-pointer addresses used to indirect to the final value.  It provides the
-following features over the built-in printing facilities provided by the fmt
-package:
-
-	* Pointers are dereferenced and followed
-	* Circular data structures are detected and handled properly
-	* Custom Stringer/error interfaces are optionally invoked, including
-	  on unexported types
-	* Custom types which only implement the Stringer/error interfaces via
-	  a pointer receiver are optionally invoked when passing non-pointer
-	  variables
-	* Byte arrays and slices are dumped like the hexdump -C command which
-	  includes offsets, byte values in hex, and ASCII output
-
-The configuration options are controlled by modifying the public members
-of c.  See ConfigState for options documentation.
-
-See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
-get the formatted result as a string.
-*/
-func (c *ConfigState) Dump(a ...interface{}) {
-	fdump(c, os.Stdout, a...)
-}
-
-// Sdump returns a string with the passed arguments formatted exactly the same
-// as Dump.
-func (c *ConfigState) Sdump(a ...interface{}) string {
-	var buf bytes.Buffer
-	fdump(c, &buf, a...)
-	return buf.String()
-}
-
-// convertArgs accepts a slice of arguments and returns a slice of the same
-// length with each argument converted to a spew Formatter interface using
-// the ConfigState associated with s.
-func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
-	formatters = make([]interface{}, len(args))
-	for index, arg := range args {
-		formatters[index] = newFormatter(c, arg)
-	}
-	return formatters
-}
-
-// NewDefaultConfig returns a ConfigState with the following default settings.
-//
-// 	Indent: " "
-// 	MaxDepth: 0
-// 	DisableMethods: false
-// 	DisablePointerMethods: false
-// 	ContinueOnMethod: false
-// 	SortKeys: false
-func NewDefaultConfig() *ConfigState {
-	return &ConfigState{Indent: " "}
-}

+ 0 - 211
vendor/github.com/davecgh/go-spew/spew/doc.go

@@ -1,211 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
-Package spew implements a deep pretty printer for Go data structures to aid in
-debugging.
-
-A quick overview of the additional features spew provides over the built-in
-printing facilities for Go data types are as follows:
-
-	* Pointers are dereferenced and followed
-	* Circular data structures are detected and handled properly
-	* Custom Stringer/error interfaces are optionally invoked, including
-	  on unexported types
-	* Custom types which only implement the Stringer/error interfaces via
-	  a pointer receiver are optionally invoked when passing non-pointer
-	  variables
-	* Byte arrays and slices are dumped like the hexdump -C command which
-	  includes offsets, byte values in hex, and ASCII output (only when using
-	  Dump style)
-
-There are two different approaches spew allows for dumping Go data structures:
-
-	* Dump style which prints with newlines, customizable indentation,
-	  and additional debug information such as types and all pointer addresses
-	  used to indirect to the final value
-	* A custom Formatter interface that integrates cleanly with the standard fmt
-	  package and replaces %v, %+v, %#v, and %#+v to provide inline printing
-	  similar to the default %v while providing the additional functionality
-	  outlined above and passing unsupported format verbs such as %x and %q
-	  along to fmt
-
-Quick Start
-
-This section demonstrates how to quickly get started with spew.  See the
-sections below for further details on formatting and configuration options.
-
-To dump a variable with full newlines, indentation, type, and pointer
-information use Dump, Fdump, or Sdump:
-	spew.Dump(myVar1, myVar2, ...)
-	spew.Fdump(someWriter, myVar1, myVar2, ...)
-	str := spew.Sdump(myVar1, myVar2, ...)
-
-Alternatively, if you would prefer to use format strings with a compacted inline
-printing style, use the convenience wrappers Printf, Fprintf, etc with
-%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
-%#+v (adds types and pointer addresses):
-	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
-	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
-	spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
-	spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
-
-Configuration Options
-
-Configuration of spew is handled by fields in the ConfigState type.  For
-convenience, all of the top-level functions use a global state available
-via the spew.Config global.
-
-It is also possible to create a ConfigState instance that provides methods
-equivalent to the top-level functions.  This allows concurrent configuration
-options.  See the ConfigState documentation for more details.
-
-The following configuration options are available:
-	* Indent
-		String to use for each indentation level for Dump functions.
-		It is a single space by default.  A popular alternative is "\t".
-
-	* MaxDepth
-		Maximum number of levels to descend into nested data structures.
-		There is no limit by default.
-
-	* DisableMethods
-		Disables invocation of error and Stringer interface methods.
-		Method invocation is enabled by default.
-
-	* DisablePointerMethods
-		Disables invocation of error and Stringer interface methods on types
-		which only accept pointer receivers from non-pointer variables.
-		Pointer method invocation is enabled by default.
-
-	* DisablePointerAddresses
-		DisablePointerAddresses specifies whether to disable the printing of
-		pointer addresses. This is useful when diffing data structures in tests.
-
-	* DisableCapacities
-		DisableCapacities specifies whether to disable the printing of
-		capacities for arrays, slices, maps and channels. This is useful when
-		diffing data structures in tests.
-
-	* ContinueOnMethod
-		Enables recursion into types after invoking error and Stringer interface
-		methods. Recursion after method invocation is disabled by default.
-
-	* SortKeys
-		Specifies map keys should be sorted before being printed. Use
-		this to have a more deterministic, diffable output.  Note that
-		only native types (bool, int, uint, floats, uintptr and string)
-		and types which implement error or Stringer interfaces are
-		supported with other types sorted according to the
-		reflect.Value.String() output which guarantees display
-		stability.  Natural map order is used by default.
-
-	* SpewKeys
-		Specifies that, as a last resort attempt, map keys should be
-		spewed to strings and sorted by those strings.  This is only
-		considered if SortKeys is true.
-
-Dump Usage
-
-Simply call spew.Dump with a list of variables you want to dump:
-
-	spew.Dump(myVar1, myVar2, ...)
-
-You may also call spew.Fdump if you would prefer to output to an arbitrary
-io.Writer.  For example, to dump to standard error:
-
-	spew.Fdump(os.Stderr, myVar1, myVar2, ...)
-
-A third option is to call spew.Sdump to get the formatted output as a string:
-
-	str := spew.Sdump(myVar1, myVar2, ...)
-
-Sample Dump Output
-
-See the Dump example for details on the setup of the types and variables being
-shown here.
-
-	(main.Foo) {
-	 unexportedField: (*main.Bar)(0xf84002e210)({
-	  flag: (main.Flag) flagTwo,
-	  data: (uintptr) <nil>
-	 }),
-	 ExportedField: (map[interface {}]interface {}) (len=1) {
-	  (string) (len=3) "one": (bool) true
-	 }
-	}
-
-Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
-command as shown.
-	([]uint8) (len=32 cap=32) {
-	 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |
-	 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0|
-	 00000020  31 32                                             |12|
-	}
-
-Custom Formatter
-
-Spew provides a custom formatter that implements the fmt.Formatter interface
-so that it integrates cleanly with standard fmt package printing functions. The
-formatter is useful for inline printing of smaller data types similar to the
-standard %v format specifier.
-
-The custom formatter only responds to the %v (most compact), %+v (adds pointer
-addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
-combinations.  Any other verbs such as %x and %q will be sent to the the
-standard fmt package for formatting.  In addition, the custom formatter ignores
-the width and precision arguments (however they will still work on the format
-specifiers not handled by the custom formatter).
-
-Custom Formatter Usage
-
-The simplest way to make use of the spew custom formatter is to call one of the
-convenience functions such as spew.Printf, spew.Println, or spew.Printf.  The
-functions have syntax you are most likely already familiar with:
-
-	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
-	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
-	spew.Println(myVar, myVar2)
-	spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
-	spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
-
-See the Index for the full list convenience functions.
-
-Sample Formatter Output
-
-Double pointer to a uint8:
-	  %v: <**>5
-	 %+v: <**>(0xf8400420d0->0xf8400420c8)5
-	 %#v: (**uint8)5
-	%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
-
-Pointer to circular struct with a uint8 field and a pointer to itself:
-	  %v: <*>{1 <*><shown>}
-	 %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
-	 %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
-	%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
-
-See the Printf example for details on the setup of variables being shown
-here.
-
-Errors
-
-Since it is possible for custom Stringer/error interfaces to panic, spew
-detects them and handles them internally by printing the panic information
-inline with the output.  Since spew is intended to provide deep pretty printing
-capabilities on structures, it intentionally does not return any errors.
-*/
-package spew

+ 0 - 509
vendor/github.com/davecgh/go-spew/spew/dump.go

@@ -1,509 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-package spew
-
-import (
-	"bytes"
-	"encoding/hex"
-	"fmt"
-	"io"
-	"os"
-	"reflect"
-	"regexp"
-	"strconv"
-	"strings"
-)
-
-var (
-	// uint8Type is a reflect.Type representing a uint8.  It is used to
-	// convert cgo types to uint8 slices for hexdumping.
-	uint8Type = reflect.TypeOf(uint8(0))
-
-	// cCharRE is a regular expression that matches a cgo char.
-	// It is used to detect character arrays to hexdump them.
-	cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
-
-	// cUnsignedCharRE is a regular expression that matches a cgo unsigned
-	// char.  It is used to detect unsigned character arrays to hexdump
-	// them.
-	cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
-
-	// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
-	// It is used to detect uint8_t arrays to hexdump them.
-	cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
-)
-
-// dumpState contains information about the state of a dump operation.
-type dumpState struct {
-	w                io.Writer
-	depth            int
-	pointers         map[uintptr]int
-	ignoreNextType   bool
-	ignoreNextIndent bool
-	cs               *ConfigState
-}
-
-// indent performs indentation according to the depth level and cs.Indent
-// option.
-func (d *dumpState) indent() {
-	if d.ignoreNextIndent {
-		d.ignoreNextIndent = false
-		return
-	}
-	d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
-}
-
-// unpackValue returns values inside of non-nil interfaces when possible.
-// This is useful for data types like structs, arrays, slices, and maps which
-// can contain varying types packed inside an interface.
-func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
-	if v.Kind() == reflect.Interface && !v.IsNil() {
-		v = v.Elem()
-	}
-	return v
-}
-
-// dumpPtr handles formatting of pointers by indirecting them as necessary.
-func (d *dumpState) dumpPtr(v reflect.Value) {
-	// Remove pointers at or below the current depth from map used to detect
-	// circular refs.
-	for k, depth := range d.pointers {
-		if depth >= d.depth {
-			delete(d.pointers, k)
-		}
-	}
-
-	// Keep list of all dereferenced pointers to show later.
-	pointerChain := make([]uintptr, 0)
-
-	// Figure out how many levels of indirection there are by dereferencing
-	// pointers and unpacking interfaces down the chain while detecting circular
-	// references.
-	nilFound := false
-	cycleFound := false
-	indirects := 0
-	ve := v
-	for ve.Kind() == reflect.Ptr {
-		if ve.IsNil() {
-			nilFound = true
-			break
-		}
-		indirects++
-		addr := ve.Pointer()
-		pointerChain = append(pointerChain, addr)
-		if pd, ok := d.pointers[addr]; ok && pd < d.depth {
-			cycleFound = true
-			indirects--
-			break
-		}
-		d.pointers[addr] = d.depth
-
-		ve = ve.Elem()
-		if ve.Kind() == reflect.Interface {
-			if ve.IsNil() {
-				nilFound = true
-				break
-			}
-			ve = ve.Elem()
-		}
-	}
-
-	// Display type information.
-	d.w.Write(openParenBytes)
-	d.w.Write(bytes.Repeat(asteriskBytes, indirects))
-	d.w.Write([]byte(ve.Type().String()))
-	d.w.Write(closeParenBytes)
-
-	// Display pointer information.
-	if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
-		d.w.Write(openParenBytes)
-		for i, addr := range pointerChain {
-			if i > 0 {
-				d.w.Write(pointerChainBytes)
-			}
-			printHexPtr(d.w, addr)
-		}
-		d.w.Write(closeParenBytes)
-	}
-
-	// Display dereferenced value.
-	d.w.Write(openParenBytes)
-	switch {
-	case nilFound == true:
-		d.w.Write(nilAngleBytes)
-
-	case cycleFound == true:
-		d.w.Write(circularBytes)
-
-	default:
-		d.ignoreNextType = true
-		d.dump(ve)
-	}
-	d.w.Write(closeParenBytes)
-}
-
-// dumpSlice handles formatting of arrays and slices.  Byte (uint8 under
-// reflection) arrays and slices are dumped in hexdump -C fashion.
-func (d *dumpState) dumpSlice(v reflect.Value) {
-	// Determine whether this type should be hex dumped or not.  Also,
-	// for types which should be hexdumped, try to use the underlying data
-	// first, then fall back to trying to convert them to a uint8 slice.
-	var buf []uint8
-	doConvert := false
-	doHexDump := false
-	numEntries := v.Len()
-	if numEntries > 0 {
-		vt := v.Index(0).Type()
-		vts := vt.String()
-		switch {
-		// C types that need to be converted.
-		case cCharRE.MatchString(vts):
-			fallthrough
-		case cUnsignedCharRE.MatchString(vts):
-			fallthrough
-		case cUint8tCharRE.MatchString(vts):
-			doConvert = true
-
-		// Try to use existing uint8 slices and fall back to converting
-		// and copying if that fails.
-		case vt.Kind() == reflect.Uint8:
-			// We need an addressable interface to convert the type
-			// to a byte slice.  However, the reflect package won't
-			// give us an interface on certain things like
-			// unexported struct fields in order to enforce
-			// visibility rules.  We use unsafe, when available, to
-			// bypass these restrictions since this package does not
-			// mutate the values.
-			vs := v
-			if !vs.CanInterface() || !vs.CanAddr() {
-				vs = unsafeReflectValue(vs)
-			}
-			if !UnsafeDisabled {
-				vs = vs.Slice(0, numEntries)
-
-				// Use the existing uint8 slice if it can be
-				// type asserted.
-				iface := vs.Interface()
-				if slice, ok := iface.([]uint8); ok {
-					buf = slice
-					doHexDump = true
-					break
-				}
-			}
-
-			// The underlying data needs to be converted if it can't
-			// be type asserted to a uint8 slice.
-			doConvert = true
-		}
-
-		// Copy and convert the underlying type if needed.
-		if doConvert && vt.ConvertibleTo(uint8Type) {
-			// Convert and copy each element into a uint8 byte
-			// slice.
-			buf = make([]uint8, numEntries)
-			for i := 0; i < numEntries; i++ {
-				vv := v.Index(i)
-				buf[i] = uint8(vv.Convert(uint8Type).Uint())
-			}
-			doHexDump = true
-		}
-	}
-
-	// Hexdump the entire slice as needed.
-	if doHexDump {
-		indent := strings.Repeat(d.cs.Indent, d.depth)
-		str := indent + hex.Dump(buf)
-		str = strings.Replace(str, "\n", "\n"+indent, -1)
-		str = strings.TrimRight(str, d.cs.Indent)
-		d.w.Write([]byte(str))
-		return
-	}
-
-	// Recursively call dump for each item.
-	for i := 0; i < numEntries; i++ {
-		d.dump(d.unpackValue(v.Index(i)))
-		if i < (numEntries - 1) {
-			d.w.Write(commaNewlineBytes)
-		} else {
-			d.w.Write(newlineBytes)
-		}
-	}
-}
-
-// dump is the main workhorse for dumping a value.  It uses the passed reflect
-// value to figure out what kind of object we are dealing with and formats it
-// appropriately.  It is a recursive function, however circular data structures
-// are detected and handled properly.
-func (d *dumpState) dump(v reflect.Value) {
-	// Handle invalid reflect values immediately.
-	kind := v.Kind()
-	if kind == reflect.Invalid {
-		d.w.Write(invalidAngleBytes)
-		return
-	}
-
-	// Handle pointers specially.
-	if kind == reflect.Ptr {
-		d.indent()
-		d.dumpPtr(v)
-		return
-	}
-
-	// Print type information unless already handled elsewhere.
-	if !d.ignoreNextType {
-		d.indent()
-		d.w.Write(openParenBytes)
-		d.w.Write([]byte(v.Type().String()))
-		d.w.Write(closeParenBytes)
-		d.w.Write(spaceBytes)
-	}
-	d.ignoreNextType = false
-
-	// Display length and capacity if the built-in len and cap functions
-	// work with the value's kind and the len/cap itself is non-zero.
-	valueLen, valueCap := 0, 0
-	switch v.Kind() {
-	case reflect.Array, reflect.Slice, reflect.Chan:
-		valueLen, valueCap = v.Len(), v.Cap()
-	case reflect.Map, reflect.String:
-		valueLen = v.Len()
-	}
-	if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
-		d.w.Write(openParenBytes)
-		if valueLen != 0 {
-			d.w.Write(lenEqualsBytes)
-			printInt(d.w, int64(valueLen), 10)
-		}
-		if !d.cs.DisableCapacities && valueCap != 0 {
-			if valueLen != 0 {
-				d.w.Write(spaceBytes)
-			}
-			d.w.Write(capEqualsBytes)
-			printInt(d.w, int64(valueCap), 10)
-		}
-		d.w.Write(closeParenBytes)
-		d.w.Write(spaceBytes)
-	}
-
-	// Call Stringer/error interfaces if they exist and the handle methods flag
-	// is enabled
-	if !d.cs.DisableMethods {
-		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
-			if handled := handleMethods(d.cs, d.w, v); handled {
-				return
-			}
-		}
-	}
-
-	switch kind {
-	case reflect.Invalid:
-		// Do nothing.  We should never get here since invalid has already
-		// been handled above.
-
-	case reflect.Bool:
-		printBool(d.w, v.Bool())
-
-	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
-		printInt(d.w, v.Int(), 10)
-
-	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
-		printUint(d.w, v.Uint(), 10)
-
-	case reflect.Float32:
-		printFloat(d.w, v.Float(), 32)
-
-	case reflect.Float64:
-		printFloat(d.w, v.Float(), 64)
-
-	case reflect.Complex64:
-		printComplex(d.w, v.Complex(), 32)
-
-	case reflect.Complex128:
-		printComplex(d.w, v.Complex(), 64)
-
-	case reflect.Slice:
-		if v.IsNil() {
-			d.w.Write(nilAngleBytes)
-			break
-		}
-		fallthrough
-
-	case reflect.Array:
-		d.w.Write(openBraceNewlineBytes)
-		d.depth++
-		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
-			d.indent()
-			d.w.Write(maxNewlineBytes)
-		} else {
-			d.dumpSlice(v)
-		}
-		d.depth--
-		d.indent()
-		d.w.Write(closeBraceBytes)
-
-	case reflect.String:
-		d.w.Write([]byte(strconv.Quote(v.String())))
-
-	case reflect.Interface:
-		// The only time we should get here is for nil interfaces due to
-		// unpackValue calls.
-		if v.IsNil() {
-			d.w.Write(nilAngleBytes)
-		}
-
-	case reflect.Ptr:
-		// Do nothing.  We should never get here since pointers have already
-		// been handled above.
-
-	case reflect.Map:
-		// nil maps should be indicated as different than empty maps
-		if v.IsNil() {
-			d.w.Write(nilAngleBytes)
-			break
-		}
-
-		d.w.Write(openBraceNewlineBytes)
-		d.depth++
-		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
-			d.indent()
-			d.w.Write(maxNewlineBytes)
-		} else {
-			numEntries := v.Len()
-			keys := v.MapKeys()
-			if d.cs.SortKeys {
-				sortValues(keys, d.cs)
-			}
-			for i, key := range keys {
-				d.dump(d.unpackValue(key))
-				d.w.Write(colonSpaceBytes)
-				d.ignoreNextIndent = true
-				d.dump(d.unpackValue(v.MapIndex(key)))
-				if i < (numEntries - 1) {
-					d.w.Write(commaNewlineBytes)
-				} else {
-					d.w.Write(newlineBytes)
-				}
-			}
-		}
-		d.depth--
-		d.indent()
-		d.w.Write(closeBraceBytes)
-
-	case reflect.Struct:
-		d.w.Write(openBraceNewlineBytes)
-		d.depth++
-		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
-			d.indent()
-			d.w.Write(maxNewlineBytes)
-		} else {
-			vt := v.Type()
-			numFields := v.NumField()
-			for i := 0; i < numFields; i++ {
-				d.indent()
-				vtf := vt.Field(i)
-				d.w.Write([]byte(vtf.Name))
-				d.w.Write(colonSpaceBytes)
-				d.ignoreNextIndent = true
-				d.dump(d.unpackValue(v.Field(i)))
-				if i < (numFields - 1) {
-					d.w.Write(commaNewlineBytes)
-				} else {
-					d.w.Write(newlineBytes)
-				}
-			}
-		}
-		d.depth--
-		d.indent()
-		d.w.Write(closeBraceBytes)
-
-	case reflect.Uintptr:
-		printHexPtr(d.w, uintptr(v.Uint()))
-
-	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
-		printHexPtr(d.w, v.Pointer())
-
-	// There were not any other types at the time this code was written, but
-	// fall back to letting the default fmt package handle it in case any new
-	// types are added.
-	default:
-		if v.CanInterface() {
-			fmt.Fprintf(d.w, "%v", v.Interface())
-		} else {
-			fmt.Fprintf(d.w, "%v", v.String())
-		}
-	}
-}
-
-// fdump is a helper function to consolidate the logic from the various public
-// methods which take varying writers and config states.
-func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
-	for _, arg := range a {
-		if arg == nil {
-			w.Write(interfaceBytes)
-			w.Write(spaceBytes)
-			w.Write(nilAngleBytes)
-			w.Write(newlineBytes)
-			continue
-		}
-
-		d := dumpState{w: w, cs: cs}
-		d.pointers = make(map[uintptr]int)
-		d.dump(reflect.ValueOf(arg))
-		d.w.Write(newlineBytes)
-	}
-}
-
-// Fdump formats and displays the passed arguments to io.Writer w.  It formats
-// exactly the same as Dump.
-func Fdump(w io.Writer, a ...interface{}) {
-	fdump(&Config, w, a...)
-}
-
-// Sdump returns a string with the passed arguments formatted exactly the same
-// as Dump.
-func Sdump(a ...interface{}) string {
-	var buf bytes.Buffer
-	fdump(&Config, &buf, a...)
-	return buf.String()
-}
-
-/*
-Dump displays the passed parameters to standard out with newlines, customizable
-indentation, and additional debug information such as complete types and all
-pointer addresses used to indirect to the final value.  It provides the
-following features over the built-in printing facilities provided by the fmt
-package:
-
-	* Pointers are dereferenced and followed
-	* Circular data structures are detected and handled properly
-	* Custom Stringer/error interfaces are optionally invoked, including
-	  on unexported types
-	* Custom types which only implement the Stringer/error interfaces via
-	  a pointer receiver are optionally invoked when passing non-pointer
-	  variables
-	* Byte arrays and slices are dumped like the hexdump -C command which
-	  includes offsets, byte values in hex, and ASCII output
-
-The configuration options are controlled by an exported package global,
-spew.Config.  See ConfigState for options documentation.
-
-See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
-get the formatted result as a string.
-*/
-func Dump(a ...interface{}) {
-	fdump(&Config, os.Stdout, a...)
-}

+ 0 - 419
vendor/github.com/davecgh/go-spew/spew/format.go

@@ -1,419 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-package spew
-
-import (
-	"bytes"
-	"fmt"
-	"reflect"
-	"strconv"
-	"strings"
-)
-
-// supportedFlags is a list of all the character flags supported by fmt package.
-const supportedFlags = "0-+# "
-
-// formatState implements the fmt.Formatter interface and contains information
-// about the state of a formatting operation.  The NewFormatter function can
-// be used to get a new Formatter which can be used directly as arguments
-// in standard fmt package printing calls.
-type formatState struct {
-	value          interface{}
-	fs             fmt.State
-	depth          int
-	pointers       map[uintptr]int
-	ignoreNextType bool
-	cs             *ConfigState
-}
-
-// buildDefaultFormat recreates the original format string without precision
-// and width information to pass in to fmt.Sprintf in the case of an
-// unrecognized type.  Unless new types are added to the language, this
-// function won't ever be called.
-func (f *formatState) buildDefaultFormat() (format string) {
-	buf := bytes.NewBuffer(percentBytes)
-
-	for _, flag := range supportedFlags {
-		if f.fs.Flag(int(flag)) {
-			buf.WriteRune(flag)
-		}
-	}
-
-	buf.WriteRune('v')
-
-	format = buf.String()
-	return format
-}
-
-// constructOrigFormat recreates the original format string including precision
-// and width information to pass along to the standard fmt package.  This allows
-// automatic deferral of all format strings this package doesn't support.
-func (f *formatState) constructOrigFormat(verb rune) (format string) {
-	buf := bytes.NewBuffer(percentBytes)
-
-	for _, flag := range supportedFlags {
-		if f.fs.Flag(int(flag)) {
-			buf.WriteRune(flag)
-		}
-	}
-
-	if width, ok := f.fs.Width(); ok {
-		buf.WriteString(strconv.Itoa(width))
-	}
-
-	if precision, ok := f.fs.Precision(); ok {
-		buf.Write(precisionBytes)
-		buf.WriteString(strconv.Itoa(precision))
-	}
-
-	buf.WriteRune(verb)
-
-	format = buf.String()
-	return format
-}
-
-// unpackValue returns values inside of non-nil interfaces when possible and
-// ensures that types for values which have been unpacked from an interface
-// are displayed when the show types flag is also set.
-// This is useful for data types like structs, arrays, slices, and maps which
-// can contain varying types packed inside an interface.
-func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
-	if v.Kind() == reflect.Interface {
-		f.ignoreNextType = false
-		if !v.IsNil() {
-			v = v.Elem()
-		}
-	}
-	return v
-}
-
-// formatPtr handles formatting of pointers by indirecting them as necessary.
-func (f *formatState) formatPtr(v reflect.Value) {
-	// Display nil if top level pointer is nil.
-	showTypes := f.fs.Flag('#')
-	if v.IsNil() && (!showTypes || f.ignoreNextType) {
-		f.fs.Write(nilAngleBytes)
-		return
-	}
-
-	// Remove pointers at or below the current depth from map used to detect
-	// circular refs.
-	for k, depth := range f.pointers {
-		if depth >= f.depth {
-			delete(f.pointers, k)
-		}
-	}
-
-	// Keep list of all dereferenced pointers to possibly show later.
-	pointerChain := make([]uintptr, 0)
-
-	// Figure out how many levels of indirection there are by derferencing
-	// pointers and unpacking interfaces down the chain while detecting circular
-	// references.
-	nilFound := false
-	cycleFound := false
-	indirects := 0
-	ve := v
-	for ve.Kind() == reflect.Ptr {
-		if ve.IsNil() {
-			nilFound = true
-			break
-		}
-		indirects++
-		addr := ve.Pointer()
-		pointerChain = append(pointerChain, addr)
-		if pd, ok := f.pointers[addr]; ok && pd < f.depth {
-			cycleFound = true
-			indirects--
-			break
-		}
-		f.pointers[addr] = f.depth
-
-		ve = ve.Elem()
-		if ve.Kind() == reflect.Interface {
-			if ve.IsNil() {
-				nilFound = true
-				break
-			}
-			ve = ve.Elem()
-		}
-	}
-
-	// Display type or indirection level depending on flags.
-	if showTypes && !f.ignoreNextType {
-		f.fs.Write(openParenBytes)
-		f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
-		f.fs.Write([]byte(ve.Type().String()))
-		f.fs.Write(closeParenBytes)
-	} else {
-		if nilFound || cycleFound {
-			indirects += strings.Count(ve.Type().String(), "*")
-		}
-		f.fs.Write(openAngleBytes)
-		f.fs.Write([]byte(strings.Repeat("*", indirects)))
-		f.fs.Write(closeAngleBytes)
-	}
-
-	// Display pointer information depending on flags.
-	if f.fs.Flag('+') && (len(pointerChain) > 0) {
-		f.fs.Write(openParenBytes)
-		for i, addr := range pointerChain {
-			if i > 0 {
-				f.fs.Write(pointerChainBytes)
-			}
-			printHexPtr(f.fs, addr)
-		}
-		f.fs.Write(closeParenBytes)
-	}
-
-	// Display dereferenced value.
-	switch {
-	case nilFound == true:
-		f.fs.Write(nilAngleBytes)
-
-	case cycleFound == true:
-		f.fs.Write(circularShortBytes)
-
-	default:
-		f.ignoreNextType = true
-		f.format(ve)
-	}
-}
-
-// format is the main workhorse for providing the Formatter interface.  It
-// uses the passed reflect value to figure out what kind of object we are
-// dealing with and formats it appropriately.  It is a recursive function,
-// however circular data structures are detected and handled properly.
-func (f *formatState) format(v reflect.Value) {
-	// Handle invalid reflect values immediately.
-	kind := v.Kind()
-	if kind == reflect.Invalid {
-		f.fs.Write(invalidAngleBytes)
-		return
-	}
-
-	// Handle pointers specially.
-	if kind == reflect.Ptr {
-		f.formatPtr(v)
-		return
-	}
-
-	// Print type information unless already handled elsewhere.
-	if !f.ignoreNextType && f.fs.Flag('#') {
-		f.fs.Write(openParenBytes)
-		f.fs.Write([]byte(v.Type().String()))
-		f.fs.Write(closeParenBytes)
-	}
-	f.ignoreNextType = false
-
-	// Call Stringer/error interfaces if they exist and the handle methods
-	// flag is enabled.
-	if !f.cs.DisableMethods {
-		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
-			if handled := handleMethods(f.cs, f.fs, v); handled {
-				return
-			}
-		}
-	}
-
-	switch kind {
-	case reflect.Invalid:
-		// Do nothing.  We should never get here since invalid has already
-		// been handled above.
-
-	case reflect.Bool:
-		printBool(f.fs, v.Bool())
-
-	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
-		printInt(f.fs, v.Int(), 10)
-
-	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
-		printUint(f.fs, v.Uint(), 10)
-
-	case reflect.Float32:
-		printFloat(f.fs, v.Float(), 32)
-
-	case reflect.Float64:
-		printFloat(f.fs, v.Float(), 64)
-
-	case reflect.Complex64:
-		printComplex(f.fs, v.Complex(), 32)
-
-	case reflect.Complex128:
-		printComplex(f.fs, v.Complex(), 64)
-
-	case reflect.Slice:
-		if v.IsNil() {
-			f.fs.Write(nilAngleBytes)
-			break
-		}
-		fallthrough
-
-	case reflect.Array:
-		f.fs.Write(openBracketBytes)
-		f.depth++
-		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
-			f.fs.Write(maxShortBytes)
-		} else {
-			numEntries := v.Len()
-			for i := 0; i < numEntries; i++ {
-				if i > 0 {
-					f.fs.Write(spaceBytes)
-				}
-				f.ignoreNextType = true
-				f.format(f.unpackValue(v.Index(i)))
-			}
-		}
-		f.depth--
-		f.fs.Write(closeBracketBytes)
-
-	case reflect.String:
-		f.fs.Write([]byte(v.String()))
-
-	case reflect.Interface:
-		// The only time we should get here is for nil interfaces due to
-		// unpackValue calls.
-		if v.IsNil() {
-			f.fs.Write(nilAngleBytes)
-		}
-
-	case reflect.Ptr:
-		// Do nothing.  We should never get here since pointers have already
-		// been handled above.
-
-	case reflect.Map:
-		// nil maps should be indicated as different than empty maps
-		if v.IsNil() {
-			f.fs.Write(nilAngleBytes)
-			break
-		}
-
-		f.fs.Write(openMapBytes)
-		f.depth++
-		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
-			f.fs.Write(maxShortBytes)
-		} else {
-			keys := v.MapKeys()
-			if f.cs.SortKeys {
-				sortValues(keys, f.cs)
-			}
-			for i, key := range keys {
-				if i > 0 {
-					f.fs.Write(spaceBytes)
-				}
-				f.ignoreNextType = true
-				f.format(f.unpackValue(key))
-				f.fs.Write(colonBytes)
-				f.ignoreNextType = true
-				f.format(f.unpackValue(v.MapIndex(key)))
-			}
-		}
-		f.depth--
-		f.fs.Write(closeMapBytes)
-
-	case reflect.Struct:
-		numFields := v.NumField()
-		f.fs.Write(openBraceBytes)
-		f.depth++
-		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
-			f.fs.Write(maxShortBytes)
-		} else {
-			vt := v.Type()
-			for i := 0; i < numFields; i++ {
-				if i > 0 {
-					f.fs.Write(spaceBytes)
-				}
-				vtf := vt.Field(i)
-				if f.fs.Flag('+') || f.fs.Flag('#') {
-					f.fs.Write([]byte(vtf.Name))
-					f.fs.Write(colonBytes)
-				}
-				f.format(f.unpackValue(v.Field(i)))
-			}
-		}
-		f.depth--
-		f.fs.Write(closeBraceBytes)
-
-	case reflect.Uintptr:
-		printHexPtr(f.fs, uintptr(v.Uint()))
-
-	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
-		printHexPtr(f.fs, v.Pointer())
-
-	// There were not any other types at the time this code was written, but
-	// fall back to letting the default fmt package handle it if any get added.
-	default:
-		format := f.buildDefaultFormat()
-		if v.CanInterface() {
-			fmt.Fprintf(f.fs, format, v.Interface())
-		} else {
-			fmt.Fprintf(f.fs, format, v.String())
-		}
-	}
-}
-
-// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
-// details.
-func (f *formatState) Format(fs fmt.State, verb rune) {
-	f.fs = fs
-
-	// Use standard formatting for verbs that are not v.
-	if verb != 'v' {
-		format := f.constructOrigFormat(verb)
-		fmt.Fprintf(fs, format, f.value)
-		return
-	}
-
-	if f.value == nil {
-		if fs.Flag('#') {
-			fs.Write(interfaceBytes)
-		}
-		fs.Write(nilAngleBytes)
-		return
-	}
-
-	f.format(reflect.ValueOf(f.value))
-}
-
-// newFormatter is a helper function to consolidate the logic from the various
-// public methods which take varying config states.
-func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
-	fs := &formatState{value: v, cs: cs}
-	fs.pointers = make(map[uintptr]int)
-	return fs
-}
-
-/*
-NewFormatter returns a custom formatter that satisfies the fmt.Formatter
-interface.  As a result, it integrates cleanly with standard fmt package
-printing functions.  The formatter is useful for inline printing of smaller data
-types similar to the standard %v format specifier.
-
-The custom formatter only responds to the %v (most compact), %+v (adds pointer
-addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
-combinations.  Any other verbs such as %x and %q will be sent to the the
-standard fmt package for formatting.  In addition, the custom formatter ignores
-the width and precision arguments (however they will still work on the format
-specifiers not handled by the custom formatter).
-
-Typically this function shouldn't be called directly.  It is much easier to make
-use of the custom formatter by calling one of the convenience functions such as
-Printf, Println, or Fprintf.
-*/
-func NewFormatter(v interface{}) fmt.Formatter {
-	return newFormatter(&Config, v)
-}

+ 0 - 148
vendor/github.com/davecgh/go-spew/spew/spew.go

@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-package spew
-
-import (
-	"fmt"
-	"io"
-)
-
-// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter.  It
-// returns the formatted string as a value that satisfies error.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
-func Errorf(format string, a ...interface{}) (err error) {
-	return fmt.Errorf(format, convertArgs(a)...)
-}
-
-// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter.  It
-// returns the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
-func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
-	return fmt.Fprint(w, convertArgs(a)...)
-}
-
-// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter.  It
-// returns the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
-func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
-	return fmt.Fprintf(w, format, convertArgs(a)...)
-}
-
-// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
-// passed with a default Formatter interface returned by NewFormatter.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
-func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
-	return fmt.Fprintln(w, convertArgs(a)...)
-}
-
-// Print is a wrapper for fmt.Print that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter.  It
-// returns the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
-func Print(a ...interface{}) (n int, err error) {
-	return fmt.Print(convertArgs(a)...)
-}
-
-// Printf is a wrapper for fmt.Printf that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter.  It
-// returns the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
-func Printf(format string, a ...interface{}) (n int, err error) {
-	return fmt.Printf(format, convertArgs(a)...)
-}
-
-// Println is a wrapper for fmt.Println that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter.  It
-// returns the number of bytes written and any write error encountered.  See
-// NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
-func Println(a ...interface{}) (n int, err error) {
-	return fmt.Println(convertArgs(a)...)
-}
-
-// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter.  It
-// returns the resulting string.  See NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
-func Sprint(a ...interface{}) string {
-	return fmt.Sprint(convertArgs(a)...)
-}
-
-// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
-// passed with a default Formatter interface returned by NewFormatter.  It
-// returns the resulting string.  See NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
-func Sprintf(format string, a ...interface{}) string {
-	return fmt.Sprintf(format, convertArgs(a)...)
-}
-
-// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
-// were passed with a default Formatter interface returned by NewFormatter.  It
-// returns the resulting string.  See NewFormatter for formatting details.
-//
-// This function is shorthand for the following syntax:
-//
-//	fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
-func Sprintln(a ...interface{}) string {
-	return fmt.Sprintln(convertArgs(a)...)
-}
-
-// convertArgs accepts a slice of arguments and returns a slice of the same
-// length with each argument converted to a default spew Formatter interface.
-func convertArgs(args []interface{}) (formatters []interface{}) {
-	formatters = make([]interface{}, len(args))
-	for index, arg := range args {
-		formatters[index] = NewFormatter(arg)
-	}
-	return formatters
-}

+ 0 - 13
vendor/github.com/fatedier/beego/LICENSE

@@ -1,13 +0,0 @@
-Copyright 2014 astaxie
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.

+ 0 - 63
vendor/github.com/fatedier/beego/logs/README.md

@@ -1,63 +0,0 @@
-## logs
-logs is a Go logs manager. It can use many logs adapters. The repo is inspired by `database/sql` .
-
-
-## How to install?
-
-	go get github.com/astaxie/beego/logs
-
-
-## What adapters are supported?
-
-As of now this logs support console, file,smtp and conn.
-
-
-## How to use it?
-
-First you must import it
-
-	import (
-		"github.com/astaxie/beego/logs"
-	)
-
-Then init a Log (example with console adapter)
-
-	log := NewLogger(10000)
-	log.SetLogger("console", "")	
-
-> the first params stand for how many channel
-
-Use it like this:	
-	
-	log.Trace("trace")
-	log.Info("info")
-	log.Warn("warning")
-	log.Debug("debug")
-	log.Critical("critical")
-
-
-## File adapter
-
-Configure file adapter like this:
-
-	log := NewLogger(10000)
-	log.SetLogger("file", `{"filename":"test.log"}`)
-
-
-## Conn adapter
-
-Configure like this:
-
-	log := NewLogger(1000)
-	log.SetLogger("conn", `{"net":"tcp","addr":":7020"}`)
-	log.Info("info")
-
-
-## Smtp adapter
-
-Configure like this:
-
-	log := NewLogger(10000)
-	log.SetLogger("smtp", `{"username":"beegotest@gmail.com","password":"xxxxxxxx","host":"smtp.gmail.com:587","sendTos":["xiemengjun@gmail.com"]}`)
-	log.Critical("sendmail critical")
-	time.Sleep(time.Second * 30)

+ 0 - 28
vendor/github.com/fatedier/beego/logs/color.go

@@ -1,28 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package logs
-
-import "io"
-
-type ansiColorWriter struct {
-	w    io.Writer
-	mode outputMode
-}
-
-func (cw *ansiColorWriter) Write(p []byte) (int, error) {
-	return cw.w.Write(p)
-}

+ 0 - 428
vendor/github.com/fatedier/beego/logs/color_windows.go

@@ -1,428 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build windows
-
-package logs
-
-import (
-	"bytes"
-	"io"
-	"strings"
-	"syscall"
-	"unsafe"
-)
-
-type (
-	csiState    int
-	parseResult int
-)
-
-const (
-	outsideCsiCode csiState = iota
-	firstCsiCode
-	secondCsiCode
-)
-
-const (
-	noConsole parseResult = iota
-	changedColor
-	unknown
-)
-
-type ansiColorWriter struct {
-	w             io.Writer
-	mode          outputMode
-	state         csiState
-	paramStartBuf bytes.Buffer
-	paramBuf      bytes.Buffer
-}
-
-const (
-	firstCsiChar   byte = '\x1b'
-	secondeCsiChar byte = '['
-	separatorChar  byte = ';'
-	sgrCode        byte = 'm'
-)
-
-const (
-	foregroundBlue      = uint16(0x0001)
-	foregroundGreen     = uint16(0x0002)
-	foregroundRed       = uint16(0x0004)
-	foregroundIntensity = uint16(0x0008)
-	backgroundBlue      = uint16(0x0010)
-	backgroundGreen     = uint16(0x0020)
-	backgroundRed       = uint16(0x0040)
-	backgroundIntensity = uint16(0x0080)
-	underscore          = uint16(0x8000)
-
-	foregroundMask = foregroundBlue | foregroundGreen | foregroundRed | foregroundIntensity
-	backgroundMask = backgroundBlue | backgroundGreen | backgroundRed | backgroundIntensity
-)
-
-const (
-	ansiReset        = "0"
-	ansiIntensityOn  = "1"
-	ansiIntensityOff = "21"
-	ansiUnderlineOn  = "4"
-	ansiUnderlineOff = "24"
-	ansiBlinkOn      = "5"
-	ansiBlinkOff     = "25"
-
-	ansiForegroundBlack   = "30"
-	ansiForegroundRed     = "31"
-	ansiForegroundGreen   = "32"
-	ansiForegroundYellow  = "33"
-	ansiForegroundBlue    = "34"
-	ansiForegroundMagenta = "35"
-	ansiForegroundCyan    = "36"
-	ansiForegroundWhite   = "37"
-	ansiForegroundDefault = "39"
-
-	ansiBackgroundBlack   = "40"
-	ansiBackgroundRed     = "41"
-	ansiBackgroundGreen   = "42"
-	ansiBackgroundYellow  = "43"
-	ansiBackgroundBlue    = "44"
-	ansiBackgroundMagenta = "45"
-	ansiBackgroundCyan    = "46"
-	ansiBackgroundWhite   = "47"
-	ansiBackgroundDefault = "49"
-
-	ansiLightForegroundGray    = "90"
-	ansiLightForegroundRed     = "91"
-	ansiLightForegroundGreen   = "92"
-	ansiLightForegroundYellow  = "93"
-	ansiLightForegroundBlue    = "94"
-	ansiLightForegroundMagenta = "95"
-	ansiLightForegroundCyan    = "96"
-	ansiLightForegroundWhite   = "97"
-
-	ansiLightBackgroundGray    = "100"
-	ansiLightBackgroundRed     = "101"
-	ansiLightBackgroundGreen   = "102"
-	ansiLightBackgroundYellow  = "103"
-	ansiLightBackgroundBlue    = "104"
-	ansiLightBackgroundMagenta = "105"
-	ansiLightBackgroundCyan    = "106"
-	ansiLightBackgroundWhite   = "107"
-)
-
-type drawType int
-
-const (
-	foreground drawType = iota
-	background
-)
-
-type winColor struct {
-	code     uint16
-	drawType drawType
-}
-
-var colorMap = map[string]winColor{
-	ansiForegroundBlack:   {0, foreground},
-	ansiForegroundRed:     {foregroundRed, foreground},
-	ansiForegroundGreen:   {foregroundGreen, foreground},
-	ansiForegroundYellow:  {foregroundRed | foregroundGreen, foreground},
-	ansiForegroundBlue:    {foregroundBlue, foreground},
-	ansiForegroundMagenta: {foregroundRed | foregroundBlue, foreground},
-	ansiForegroundCyan:    {foregroundGreen | foregroundBlue, foreground},
-	ansiForegroundWhite:   {foregroundRed | foregroundGreen | foregroundBlue, foreground},
-	ansiForegroundDefault: {foregroundRed | foregroundGreen | foregroundBlue, foreground},
-
-	ansiBackgroundBlack:   {0, background},
-	ansiBackgroundRed:     {backgroundRed, background},
-	ansiBackgroundGreen:   {backgroundGreen, background},
-	ansiBackgroundYellow:  {backgroundRed | backgroundGreen, background},
-	ansiBackgroundBlue:    {backgroundBlue, background},
-	ansiBackgroundMagenta: {backgroundRed | backgroundBlue, background},
-	ansiBackgroundCyan:    {backgroundGreen | backgroundBlue, background},
-	ansiBackgroundWhite:   {backgroundRed | backgroundGreen | backgroundBlue, background},
-	ansiBackgroundDefault: {0, background},
-
-	ansiLightForegroundGray:    {foregroundIntensity, foreground},
-	ansiLightForegroundRed:     {foregroundIntensity | foregroundRed, foreground},
-	ansiLightForegroundGreen:   {foregroundIntensity | foregroundGreen, foreground},
-	ansiLightForegroundYellow:  {foregroundIntensity | foregroundRed | foregroundGreen, foreground},
-	ansiLightForegroundBlue:    {foregroundIntensity | foregroundBlue, foreground},
-	ansiLightForegroundMagenta: {foregroundIntensity | foregroundRed | foregroundBlue, foreground},
-	ansiLightForegroundCyan:    {foregroundIntensity | foregroundGreen | foregroundBlue, foreground},
-	ansiLightForegroundWhite:   {foregroundIntensity | foregroundRed | foregroundGreen | foregroundBlue, foreground},
-
-	ansiLightBackgroundGray:    {backgroundIntensity, background},
-	ansiLightBackgroundRed:     {backgroundIntensity | backgroundRed, background},
-	ansiLightBackgroundGreen:   {backgroundIntensity | backgroundGreen, background},
-	ansiLightBackgroundYellow:  {backgroundIntensity | backgroundRed | backgroundGreen, background},
-	ansiLightBackgroundBlue:    {backgroundIntensity | backgroundBlue, background},
-	ansiLightBackgroundMagenta: {backgroundIntensity | backgroundRed | backgroundBlue, background},
-	ansiLightBackgroundCyan:    {backgroundIntensity | backgroundGreen | backgroundBlue, background},
-	ansiLightBackgroundWhite:   {backgroundIntensity | backgroundRed | backgroundGreen | backgroundBlue, background},
-}
-
-var (
-	kernel32                       = syscall.NewLazyDLL("kernel32.dll")
-	procSetConsoleTextAttribute    = kernel32.NewProc("SetConsoleTextAttribute")
-	procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
-	defaultAttr                    *textAttributes
-)
-
-func init() {
-	screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout))
-	if screenInfo != nil {
-		colorMap[ansiForegroundDefault] = winColor{
-			screenInfo.WAttributes & (foregroundRed | foregroundGreen | foregroundBlue),
-			foreground,
-		}
-		colorMap[ansiBackgroundDefault] = winColor{
-			screenInfo.WAttributes & (backgroundRed | backgroundGreen | backgroundBlue),
-			background,
-		}
-		defaultAttr = convertTextAttr(screenInfo.WAttributes)
-	}
-}
-
-type coord struct {
-	X, Y int16
-}
-
-type smallRect struct {
-	Left, Top, Right, Bottom int16
-}
-
-type consoleScreenBufferInfo struct {
-	DwSize              coord
-	DwCursorPosition    coord
-	WAttributes         uint16
-	SrWindow            smallRect
-	DwMaximumWindowSize coord
-}
-
-func getConsoleScreenBufferInfo(hConsoleOutput uintptr) *consoleScreenBufferInfo {
-	var csbi consoleScreenBufferInfo
-	ret, _, _ := procGetConsoleScreenBufferInfo.Call(
-		hConsoleOutput,
-		uintptr(unsafe.Pointer(&csbi)))
-	if ret == 0 {
-		return nil
-	}
-	return &csbi
-}
-
-func setConsoleTextAttribute(hConsoleOutput uintptr, wAttributes uint16) bool {
-	ret, _, _ := procSetConsoleTextAttribute.Call(
-		hConsoleOutput,
-		uintptr(wAttributes))
-	return ret != 0
-}
-
-type textAttributes struct {
-	foregroundColor     uint16
-	backgroundColor     uint16
-	foregroundIntensity uint16
-	backgroundIntensity uint16
-	underscore          uint16
-	otherAttributes     uint16
-}
-
-func convertTextAttr(winAttr uint16) *textAttributes {
-	fgColor := winAttr & (foregroundRed | foregroundGreen | foregroundBlue)
-	bgColor := winAttr & (backgroundRed | backgroundGreen | backgroundBlue)
-	fgIntensity := winAttr & foregroundIntensity
-	bgIntensity := winAttr & backgroundIntensity
-	underline := winAttr & underscore
-	otherAttributes := winAttr &^ (foregroundMask | backgroundMask | underscore)
-	return &textAttributes{fgColor, bgColor, fgIntensity, bgIntensity, underline, otherAttributes}
-}
-
-func convertWinAttr(textAttr *textAttributes) uint16 {
-	var winAttr uint16
-	winAttr |= textAttr.foregroundColor
-	winAttr |= textAttr.backgroundColor
-	winAttr |= textAttr.foregroundIntensity
-	winAttr |= textAttr.backgroundIntensity
-	winAttr |= textAttr.underscore
-	winAttr |= textAttr.otherAttributes
-	return winAttr
-}
-
-func changeColor(param []byte) parseResult {
-	screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout))
-	if screenInfo == nil {
-		return noConsole
-	}
-
-	winAttr := convertTextAttr(screenInfo.WAttributes)
-	strParam := string(param)
-	if len(strParam) <= 0 {
-		strParam = "0"
-	}
-	csiParam := strings.Split(strParam, string(separatorChar))
-	for _, p := range csiParam {
-		c, ok := colorMap[p]
-		switch {
-		case !ok:
-			switch p {
-			case ansiReset:
-				winAttr.foregroundColor = defaultAttr.foregroundColor
-				winAttr.backgroundColor = defaultAttr.backgroundColor
-				winAttr.foregroundIntensity = defaultAttr.foregroundIntensity
-				winAttr.backgroundIntensity = defaultAttr.backgroundIntensity
-				winAttr.underscore = 0
-				winAttr.otherAttributes = 0
-			case ansiIntensityOn:
-				winAttr.foregroundIntensity = foregroundIntensity
-			case ansiIntensityOff:
-				winAttr.foregroundIntensity = 0
-			case ansiUnderlineOn:
-				winAttr.underscore = underscore
-			case ansiUnderlineOff:
-				winAttr.underscore = 0
-			case ansiBlinkOn:
-				winAttr.backgroundIntensity = backgroundIntensity
-			case ansiBlinkOff:
-				winAttr.backgroundIntensity = 0
-			default:
-				// unknown code
-			}
-		case c.drawType == foreground:
-			winAttr.foregroundColor = c.code
-		case c.drawType == background:
-			winAttr.backgroundColor = c.code
-		}
-	}
-	winTextAttribute := convertWinAttr(winAttr)
-	setConsoleTextAttribute(uintptr(syscall.Stdout), winTextAttribute)
-
-	return changedColor
-}
-
-func parseEscapeSequence(command byte, param []byte) parseResult {
-	if defaultAttr == nil {
-		return noConsole
-	}
-
-	switch command {
-	case sgrCode:
-		return changeColor(param)
-	default:
-		return unknown
-	}
-}
-
-func (cw *ansiColorWriter) flushBuffer() (int, error) {
-	return cw.flushTo(cw.w)
-}
-
-func (cw *ansiColorWriter) resetBuffer() (int, error) {
-	return cw.flushTo(nil)
-}
-
-func (cw *ansiColorWriter) flushTo(w io.Writer) (int, error) {
-	var n1, n2 int
-	var err error
-
-	startBytes := cw.paramStartBuf.Bytes()
-	cw.paramStartBuf.Reset()
-	if w != nil {
-		n1, err = cw.w.Write(startBytes)
-		if err != nil {
-			return n1, err
-		}
-	} else {
-		n1 = len(startBytes)
-	}
-	paramBytes := cw.paramBuf.Bytes()
-	cw.paramBuf.Reset()
-	if w != nil {
-		n2, err = cw.w.Write(paramBytes)
-		if err != nil {
-			return n1 + n2, err
-		}
-	} else {
-		n2 = len(paramBytes)
-	}
-	return n1 + n2, nil
-}
-
-func isParameterChar(b byte) bool {
-	return ('0' <= b && b <= '9') || b == separatorChar
-}
-
-func (cw *ansiColorWriter) Write(p []byte) (int, error) {
-	r, nw, first, last := 0, 0, 0, 0
-	if cw.mode != DiscardNonColorEscSeq {
-		cw.state = outsideCsiCode
-		cw.resetBuffer()
-	}
-
-	var err error
-	for i, ch := range p {
-		switch cw.state {
-		case outsideCsiCode:
-			if ch == firstCsiChar {
-				cw.paramStartBuf.WriteByte(ch)
-				cw.state = firstCsiCode
-			}
-		case firstCsiCode:
-			switch ch {
-			case firstCsiChar:
-				cw.paramStartBuf.WriteByte(ch)
-				break
-			case secondeCsiChar:
-				cw.paramStartBuf.WriteByte(ch)
-				cw.state = secondCsiCode
-				last = i - 1
-			default:
-				cw.resetBuffer()
-				cw.state = outsideCsiCode
-			}
-		case secondCsiCode:
-			if isParameterChar(ch) {
-				cw.paramBuf.WriteByte(ch)
-			} else {
-				nw, err = cw.w.Write(p[first:last])
-				r += nw
-				if err != nil {
-					return r, err
-				}
-				first = i + 1
-				result := parseEscapeSequence(ch, cw.paramBuf.Bytes())
-				if result == noConsole || (cw.mode == OutputNonColorEscSeq && result == unknown) {
-					cw.paramBuf.WriteByte(ch)
-					nw, err := cw.flushBuffer()
-					if err != nil {
-						return r, err
-					}
-					r += nw
-				} else {
-					n, _ := cw.resetBuffer()
-					// Add one more to the size of the buffer for the last ch
-					r += n + 1
-				}
-
-				cw.state = outsideCsiCode
-			}
-		default:
-			cw.state = outsideCsiCode
-		}
-	}
-
-	if cw.mode != DiscardNonColorEscSeq || cw.state == outsideCsiCode {
-		nw, err = cw.w.Write(p[first:])
-		r += nw
-	}
-
-	return r, err
-}

+ 0 - 117
vendor/github.com/fatedier/beego/logs/conn.go

@@ -1,117 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logs
-
-import (
-	"encoding/json"
-	"io"
-	"net"
-	"time"
-)
-
-// connWriter implements LoggerInterface.
-// it writes messages in keep-live tcp connection.
-type connWriter struct {
-	lg             *logWriter
-	innerWriter    io.WriteCloser
-	ReconnectOnMsg bool   `json:"reconnectOnMsg"`
-	Reconnect      bool   `json:"reconnect"`
-	Net            string `json:"net"`
-	Addr           string `json:"addr"`
-	Level          int    `json:"level"`
-}
-
-// NewConn create new ConnWrite returning as LoggerInterface.
-func NewConn() Logger {
-	conn := new(connWriter)
-	conn.Level = LevelTrace
-	return conn
-}
-
-// Init init connection writer with json config.
-// json config only need key "level".
-func (c *connWriter) Init(jsonConfig string) error {
-	return json.Unmarshal([]byte(jsonConfig), c)
-}
-
-// WriteMsg write message in connection.
-// if connection is down, try to re-connect.
-func (c *connWriter) WriteMsg(when time.Time, msg string, level int) error {
-	if level > c.Level {
-		return nil
-	}
-	if c.needToConnectOnMsg() {
-		err := c.connect()
-		if err != nil {
-			return err
-		}
-	}
-
-	if c.ReconnectOnMsg {
-		defer c.innerWriter.Close()
-	}
-
-	c.lg.println(when, msg)
-	return nil
-}
-
-// Flush implementing method. empty.
-func (c *connWriter) Flush() {
-
-}
-
-// Destroy destroy connection writer and close tcp listener.
-func (c *connWriter) Destroy() {
-	if c.innerWriter != nil {
-		c.innerWriter.Close()
-	}
-}
-
-func (c *connWriter) connect() error {
-	if c.innerWriter != nil {
-		c.innerWriter.Close()
-		c.innerWriter = nil
-	}
-
-	conn, err := net.Dial(c.Net, c.Addr)
-	if err != nil {
-		return err
-	}
-
-	if tcpConn, ok := conn.(*net.TCPConn); ok {
-		tcpConn.SetKeepAlive(true)
-	}
-
-	c.innerWriter = conn
-	c.lg = newLogWriter(conn)
-	return nil
-}
-
-func (c *connWriter) needToConnectOnMsg() bool {
-	if c.Reconnect {
-		c.Reconnect = false
-		return true
-	}
-
-	if c.innerWriter == nil {
-		return true
-	}
-
-	return c.ReconnectOnMsg
-}
-
-func init() {
-	Register(AdapterConn, NewConn)
-}

+ 0 - 102
vendor/github.com/fatedier/beego/logs/console.go

@@ -1,102 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logs
-
-import (
-	"encoding/json"
-	"os"
-	"runtime"
-	"time"
-)
-
-// brush is a color join function
-type brush func(string) string
-
-// newBrush return a fix color Brush
-func newBrush(color string) brush {
-	pre := "\033["
-	reset := "\033[0m"
-	return func(text string) string {
-		return pre + color + "m" + text + reset
-	}
-}
-
-var colors = []brush{
-	newBrush("1;37"), // Emergency          white
-	newBrush("1;36"), // Alert              cyan
-	newBrush("1;35"), // Critical           magenta
-	newBrush("1;31"), // Error              red
-	newBrush("1;33"), // Warning            yellow
-	newBrush("1;32"), // Notice             green
-	newBrush("1;34"), // Informational      blue
-	newBrush("1;34"), // Debug              blue
-	newBrush("1;34"), // Trace              blue
-}
-
-// consoleWriter implements LoggerInterface and writes messages to terminal.
-type consoleWriter struct {
-	lg       *logWriter
-	Level    int  `json:"level"`
-	Colorful bool `json:"color"` //this filed is useful only when system's terminal supports color
-}
-
-// NewConsole create ConsoleWriter returning as LoggerInterface.
-func NewConsole() Logger {
-	cw := &consoleWriter{
-		lg:       newLogWriter(os.Stdout),
-		Level:    LevelTrace,
-		Colorful: runtime.GOOS != "windows",
-	}
-	return cw
-}
-
-// Init init console logger.
-// jsonConfig like '{"level":LevelTrace}'.
-func (c *consoleWriter) Init(jsonConfig string) error {
-	if len(jsonConfig) == 0 {
-		return nil
-	}
-	err := json.Unmarshal([]byte(jsonConfig), c)
-	if runtime.GOOS == "windows" {
-		c.Colorful = false
-	}
-	return err
-}
-
-// WriteMsg write message in console.
-func (c *consoleWriter) WriteMsg(when time.Time, msg string, level int) error {
-	if level > c.Level {
-		return nil
-	}
-	if c.Colorful {
-		msg = colors[level](msg)
-	}
-	c.lg.println(when, msg)
-	return nil
-}
-
-// Destroy implementing method. empty.
-func (c *consoleWriter) Destroy() {
-
-}
-
-// Flush implementing method. empty.
-func (c *consoleWriter) Flush() {
-
-}
-
-func init() {
-	Register(AdapterConsole, NewConsole)
-}

+ 0 - 327
vendor/github.com/fatedier/beego/logs/file.go

@@ -1,327 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logs
-
-import (
-	"bytes"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"io"
-	"os"
-	"path/filepath"
-	"strconv"
-	"strings"
-	"sync"
-	"time"
-)
-
-// fileLogWriter implements LoggerInterface.
-// It writes messages by lines limit, file size limit, or time frequency.
-type fileLogWriter struct {
-	sync.RWMutex // write log order by order and  atomic incr maxLinesCurLines and maxSizeCurSize
-	// The opened file
-	Filename   string `json:"filename"`
-	fileWriter *os.File
-
-	// Rotate at line
-	MaxLines         int `json:"maxlines"`
-	maxLinesCurLines int
-
-	// Rotate at size
-	MaxSize        int `json:"maxsize"`
-	maxSizeCurSize int
-
-	// Rotate daily
-	Daily         bool  `json:"daily"`
-	MaxDays       int64 `json:"maxdays"`
-	dailyOpenDate int
-	dailyOpenTime time.Time
-
-	Rotate bool `json:"rotate"`
-
-	Level int `json:"level"`
-
-	Perm string `json:"perm"`
-
-	fileNameOnly, suffix string // like "project.log", project is fileNameOnly and .log is suffix
-}
-
-// newFileWriter create a FileLogWriter returning as LoggerInterface.
-func newFileWriter() Logger {
-	w := &fileLogWriter{
-		Daily:   true,
-		MaxDays: 7,
-		Rotate:  true,
-		Level:   LevelTrace,
-		Perm:    "0660",
-	}
-	return w
-}
-
-// Init file logger with json config.
-// jsonConfig like:
-//	{
-//	"filename":"logs/beego.log",
-//	"maxLines":10000,
-//	"maxsize":1024,
-//	"daily":true,
-//	"maxDays":15,
-//	"rotate":true,
-//  	"perm":"0600"
-//	}
-func (w *fileLogWriter) Init(jsonConfig string) error {
-	err := json.Unmarshal([]byte(jsonConfig), w)
-	if err != nil {
-		return err
-	}
-	if len(w.Filename) == 0 {
-		return errors.New("jsonconfig must have filename")
-	}
-	w.suffix = filepath.Ext(w.Filename)
-	w.fileNameOnly = strings.TrimSuffix(w.Filename, w.suffix)
-	if w.suffix == "" {
-		w.suffix = ".log"
-	}
-	err = w.startLogger()
-	return err
-}
-
-// start file logger. create log file and set to locker-inside file writer.
-func (w *fileLogWriter) startLogger() error {
-	file, err := w.createLogFile()
-	if err != nil {
-		return err
-	}
-	if w.fileWriter != nil {
-		w.fileWriter.Close()
-	}
-	w.fileWriter = file
-	return w.initFd()
-}
-
-func (w *fileLogWriter) needRotate(size int, day int) bool {
-	return (w.MaxLines > 0 && w.maxLinesCurLines >= w.MaxLines) ||
-		(w.MaxSize > 0 && w.maxSizeCurSize >= w.MaxSize) ||
-		(w.Daily && day != w.dailyOpenDate)
-
-}
-
-// WriteMsg write logger message into file.
-func (w *fileLogWriter) WriteMsg(when time.Time, msg string, level int) error {
-	if level > w.Level {
-		return nil
-	}
-	h, d := formatTimeHeader(when)
-	msg = string(h) + msg + "\n"
-	if w.Rotate {
-		w.RLock()
-		if w.needRotate(len(msg), d) {
-			w.RUnlock()
-			w.Lock()
-			if w.needRotate(len(msg), d) {
-				if err := w.doRotate(when); err != nil {
-					fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
-				}
-			}
-			w.Unlock()
-		} else {
-			w.RUnlock()
-		}
-	}
-
-	w.Lock()
-	_, err := w.fileWriter.Write([]byte(msg))
-	if err == nil {
-		w.maxLinesCurLines++
-		w.maxSizeCurSize += len(msg)
-	}
-	w.Unlock()
-	return err
-}
-
-func (w *fileLogWriter) createLogFile() (*os.File, error) {
-	// Open the log file
-	perm, err := strconv.ParseInt(w.Perm, 8, 64)
-	if err != nil {
-		return nil, err
-	}
-	fd, err := os.OpenFile(w.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.FileMode(perm))
-	if err == nil {
-		// Make sure file perm is user set perm cause of `os.OpenFile` will obey umask
-		os.Chmod(w.Filename, os.FileMode(perm))
-	}
-	return fd, err
-}
-
-func (w *fileLogWriter) initFd() error {
-	fd := w.fileWriter
-	fInfo, err := fd.Stat()
-	if err != nil {
-		return fmt.Errorf("get stat err: %s\n", err)
-	}
-	w.maxSizeCurSize = int(fInfo.Size())
-	w.dailyOpenTime = time.Now()
-	w.dailyOpenDate = w.dailyOpenTime.Day()
-	w.maxLinesCurLines = 0
-	if w.Daily {
-		go w.dailyRotate(w.dailyOpenTime)
-	}
-	if fInfo.Size() > 0 {
-		count, err := w.lines()
-		if err != nil {
-			return err
-		}
-		w.maxLinesCurLines = count
-	}
-	return nil
-}
-
-func (w *fileLogWriter) dailyRotate(openTime time.Time) {
-	y, m, d := openTime.Add(24 * time.Hour).Date()
-	nextDay := time.Date(y, m, d, 0, 0, 0, 0, openTime.Location())
-	tm := time.NewTimer(time.Duration(nextDay.UnixNano() - openTime.UnixNano() + 100))
-	select {
-	case <-tm.C:
-		w.Lock()
-		if w.needRotate(0, time.Now().Day()) {
-			if err := w.doRotate(time.Now()); err != nil {
-				fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
-			}
-		}
-		w.Unlock()
-	}
-}
-
-func (w *fileLogWriter) lines() (int, error) {
-	fd, err := os.Open(w.Filename)
-	if err != nil {
-		return 0, err
-	}
-	defer fd.Close()
-
-	buf := make([]byte, 32768) // 32k
-	count := 0
-	lineSep := []byte{'\n'}
-
-	for {
-		c, err := fd.Read(buf)
-		if err != nil && err != io.EOF {
-			return count, err
-		}
-
-		count += bytes.Count(buf[:c], lineSep)
-
-		if err == io.EOF {
-			break
-		}
-	}
-
-	return count, nil
-}
-
-// DoRotate means it need to write file in new file.
-// new file name like xx.2013-01-01.log (daily) or xx.001.log (by line or size)
-func (w *fileLogWriter) doRotate(logTime time.Time) error {
-	// file exists
-	// Find the next available number
-	num := 1
-	fName := ""
-
-	_, err := os.Lstat(w.Filename)
-	if err != nil {
-		//even if the file is not exist or other ,we should RESTART the logger
-		goto RESTART_LOGGER
-	}
-
-	if w.MaxLines > 0 || w.MaxSize > 0 {
-		for ; err == nil && num <= 999; num++ {
-			fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", logTime.Format("2006-01-02"), num, w.suffix)
-			_, err = os.Lstat(fName)
-		}
-	} else {
-		fName = fmt.Sprintf("%s.%s%s", w.fileNameOnly, w.dailyOpenTime.Format("2006-01-02"), w.suffix)
-		_, err = os.Lstat(fName)
-		for ; err == nil && num <= 999; num++ {
-			fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", w.dailyOpenTime.Format("2006-01-02"), num, w.suffix)
-			_, err = os.Lstat(fName)
-		}
-	}
-	// return error if the last file checked still existed
-	if err == nil {
-		return fmt.Errorf("Rotate: Cannot find free log number to rename %s\n", w.Filename)
-	}
-
-	// close fileWriter before rename
-	w.fileWriter.Close()
-
-	// Rename the file to its new found name
-	// even if occurs error,we MUST guarantee to  restart new logger
-	err = os.Rename(w.Filename, fName)
-	err = os.Chmod(fName, os.FileMode(0440))
-	// re-start logger
-RESTART_LOGGER:
-
-	startLoggerErr := w.startLogger()
-	go w.deleteOldLog()
-
-	if startLoggerErr != nil {
-		return fmt.Errorf("Rotate StartLogger: %s\n", startLoggerErr)
-	}
-	if err != nil {
-		return fmt.Errorf("Rotate: %s\n", err)
-	}
-	return nil
-
-}
-
-func (w *fileLogWriter) deleteOldLog() {
-	dir := filepath.Dir(w.Filename)
-	filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) {
-		defer func() {
-			if r := recover(); r != nil {
-				fmt.Fprintf(os.Stderr, "Unable to delete old log '%s', error: %v\n", path, r)
-			}
-		}()
-
-		if info == nil {
-			return
-		}
-
-		if !info.IsDir() && info.ModTime().Add(24*time.Hour*time.Duration(w.MaxDays)).Before(time.Now()) {
-			if strings.HasPrefix(filepath.Base(path), filepath.Base(w.fileNameOnly)) &&
-				strings.HasSuffix(filepath.Base(path), w.suffix) {
-				os.Remove(path)
-			}
-		}
-		return
-	})
-}
-
-// Destroy close the file description, close file writer.
-func (w *fileLogWriter) Destroy() {
-	w.fileWriter.Close()
-}
-
-// Flush flush file logger.
-// there are no buffering messages in file logger in memory.
-// flush file means sync file from disk.
-func (w *fileLogWriter) Flush() {
-	w.fileWriter.Sync()
-}
-
-func init() {
-	Register(AdapterFile, newFileWriter)
-}

+ 0 - 78
vendor/github.com/fatedier/beego/logs/jianliao.go

@@ -1,78 +0,0 @@
-package logs
-
-import (
-	"encoding/json"
-	"fmt"
-	"net/http"
-	"net/url"
-	"time"
-)
-
-// JLWriter implements beego LoggerInterface and is used to send jiaoliao webhook
-type JLWriter struct {
-	AuthorName  string `json:"authorname"`
-	Title       string `json:"title"`
-	WebhookURL  string `json:"webhookurl"`
-	RedirectURL string `json:"redirecturl,omitempty"`
-	ImageURL    string `json:"imageurl,omitempty"`
-	Level       int    `json:"level"`
-}
-
-// newJLWriter create jiaoliao writer.
-func newJLWriter() Logger {
-	return &JLWriter{Level: LevelTrace}
-}
-
-// Init JLWriter with json config string
-func (s *JLWriter) Init(jsonconfig string) error {
-	err := json.Unmarshal([]byte(jsonconfig), s)
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-// WriteMsg write message in smtp writer.
-// it will send an email with subject and only this message.
-func (s *JLWriter) WriteMsg(when time.Time, msg string, level int) error {
-	if level > s.Level {
-		return nil
-	}
-
-	text := fmt.Sprintf("%s %s", when.Format("2006-01-02 15:04:05"), msg)
-
-	form := url.Values{}
-	form.Add("authorName", s.AuthorName)
-	form.Add("title", s.Title)
-	form.Add("text", text)
-	if s.RedirectURL != "" {
-		form.Add("redirectUrl", s.RedirectURL)
-	}
-	if s.ImageURL != "" {
-		form.Add("imageUrl", s.ImageURL)
-	}
-
-	resp, err := http.PostForm(s.WebhookURL, form)
-	if err != nil {
-		return err
-	}
-	defer resp.Body.Close()
-	if resp.StatusCode != http.StatusOK {
-		return fmt.Errorf("Post webhook failed %s %d", resp.Status, resp.StatusCode)
-	}
-	return nil
-}
-
-// Flush implementing method. empty.
-func (s *JLWriter) Flush() {
-	return
-}
-
-// Destroy implementing method. empty.
-func (s *JLWriter) Destroy() {
-	return
-}
-
-func init() {
-	Register(AdapterJianLiao, newJLWriter)
-}

+ 0 - 657
vendor/github.com/fatedier/beego/logs/log.go

@@ -1,657 +0,0 @@
-// Copyright 2012 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package logs provide a general log interface
-// Usage:
-//
-// import "github.com/astaxie/beego/logs"
-//
-//	log := NewLogger(10000)
-//	log.SetLogger("console", "")
-//
-//	> the first params stand for how many channel
-//
-// Use it like this:
-//
-//	log.Trace("trace")
-//	log.Info("info")
-//	log.Warn("warning")
-//	log.Debug("debug")
-//	log.Critical("critical")
-//
-//  more docs http://beego.me/docs/module/logs.md
-package logs
-
-import (
-	"fmt"
-	"log"
-	"os"
-	"path"
-	"runtime"
-	"strconv"
-	"strings"
-	"sync"
-	"time"
-)
-
-// RFC5424 log message levels.
-const (
-	LevelEmergency = iota
-	LevelAlert
-	LevelCritical
-	LevelError
-	LevelWarning
-	LevelNotice
-	LevelInformational
-	LevelDebug
-	LevelTrace
-)
-
-// levelLogLogger is defined to implement log.Logger
-// the real log level will be LevelEmergency
-const levelLoggerImpl = -1
-
-// Name for adapter with beego official support
-const (
-	AdapterConsole   = "console"
-	AdapterFile      = "file"
-	AdapterMultiFile = "multifile"
-	AdapterMail      = "smtp"
-	AdapterConn      = "conn"
-	AdapterEs        = "es"
-	AdapterJianLiao  = "jianliao"
-	AdapterSlack     = "slack"
-	AdapterAliLS     = "alils"
-)
-
-// Legacy log level constants to ensure backwards compatibility.
-const (
-	LevelInfo = LevelInformational
-	LevelWarn = LevelWarning
-)
-
-type newLoggerFunc func() Logger
-
-// Logger defines the behavior of a log provider.
-type Logger interface {
-	Init(config string) error
-	WriteMsg(when time.Time, msg string, level int) error
-	Destroy()
-	Flush()
-}
-
-var adapters = make(map[string]newLoggerFunc)
-var levelPrefix = [LevelTrace + 1]string{"[M] ", "[A] ", "[C] ", "[E] ", "[W] ", "[N] ", "[I] ", "[D] ", "[T] "}
-
-// Register makes a log provide available by the provided name.
-// If Register is called twice with the same name or if driver is nil,
-// it panics.
-func Register(name string, log newLoggerFunc) {
-	if log == nil {
-		panic("logs: Register provide is nil")
-	}
-	if _, dup := adapters[name]; dup {
-		panic("logs: Register called twice for provider " + name)
-	}
-	adapters[name] = log
-}
-
-// BeeLogger is default logger in beego application.
-// it can contain several providers and log message into all providers.
-type BeeLogger struct {
-	lock                sync.Mutex
-	level               int
-	init                bool
-	enableFuncCallDepth bool
-	loggerFuncCallDepth int
-	asynchronous        bool
-	msgChanLen          int64
-	msgChan             chan *logMsg
-	signalChan          chan string
-	wg                  sync.WaitGroup
-	outputs             []*nameLogger
-}
-
-const defaultAsyncMsgLen = 1e3
-
-type nameLogger struct {
-	Logger
-	name string
-}
-
-type logMsg struct {
-	level int
-	msg   string
-	when  time.Time
-}
-
-var logMsgPool *sync.Pool
-
-// NewLogger returns a new BeeLogger.
-// channelLen means the number of messages in chan(used where asynchronous is true).
-// if the buffering chan is full, logger adapters write to file or other way.
-func NewLogger(channelLens ...int64) *BeeLogger {
-	bl := new(BeeLogger)
-	bl.level = LevelDebug
-	bl.loggerFuncCallDepth = 2
-	bl.msgChanLen = append(channelLens, 0)[0]
-	if bl.msgChanLen <= 0 {
-		bl.msgChanLen = defaultAsyncMsgLen
-	}
-	bl.signalChan = make(chan string, 1)
-	bl.setLogger(AdapterConsole)
-	return bl
-}
-
-// Async set the log to asynchronous and start the goroutine
-func (bl *BeeLogger) Async(msgLen ...int64) *BeeLogger {
-	bl.lock.Lock()
-	defer bl.lock.Unlock()
-	if bl.asynchronous {
-		return bl
-	}
-	bl.asynchronous = true
-	if len(msgLen) > 0 && msgLen[0] > 0 {
-		bl.msgChanLen = msgLen[0]
-	}
-	bl.msgChan = make(chan *logMsg, bl.msgChanLen)
-	logMsgPool = &sync.Pool{
-		New: func() interface{} {
-			return &logMsg{}
-		},
-	}
-	bl.wg.Add(1)
-	go bl.startLogger()
-	return bl
-}
-
-// SetLogger provides a given logger adapter into BeeLogger with config string.
-// config need to be correct JSON as string: {"interval":360}.
-func (bl *BeeLogger) setLogger(adapterName string, configs ...string) error {
-	config := append(configs, "{}")[0]
-	for _, l := range bl.outputs {
-		if l.name == adapterName {
-			return fmt.Errorf("logs: duplicate adaptername %q (you have set this logger before)", adapterName)
-		}
-	}
-
-	log, ok := adapters[adapterName]
-	if !ok {
-		return fmt.Errorf("logs: unknown adaptername %q (forgotten Register?)", adapterName)
-	}
-
-	lg := log()
-	err := lg.Init(config)
-	if err != nil {
-		fmt.Fprintln(os.Stderr, "logs.BeeLogger.SetLogger: "+err.Error())
-		return err
-	}
-	bl.outputs = append(bl.outputs, &nameLogger{name: adapterName, Logger: lg})
-	return nil
-}
-
-// SetLogger provides a given logger adapter into BeeLogger with config string.
-// config need to be correct JSON as string: {"interval":360}.
-func (bl *BeeLogger) SetLogger(adapterName string, configs ...string) error {
-	bl.lock.Lock()
-	defer bl.lock.Unlock()
-	if !bl.init {
-		bl.outputs = []*nameLogger{}
-		bl.init = true
-	}
-	return bl.setLogger(adapterName, configs...)
-}
-
-// DelLogger remove a logger adapter in BeeLogger.
-func (bl *BeeLogger) DelLogger(adapterName string) error {
-	bl.lock.Lock()
-	defer bl.lock.Unlock()
-	outputs := []*nameLogger{}
-	for _, lg := range bl.outputs {
-		if lg.name == adapterName {
-			lg.Destroy()
-		} else {
-			outputs = append(outputs, lg)
-		}
-	}
-	if len(outputs) == len(bl.outputs) {
-		return fmt.Errorf("logs: unknown adaptername %q (forgotten Register?)", adapterName)
-	}
-	bl.outputs = outputs
-	return nil
-}
-
-func (bl *BeeLogger) writeToLoggers(when time.Time, msg string, level int) {
-	for _, l := range bl.outputs {
-		err := l.WriteMsg(when, msg, level)
-		if err != nil {
-			fmt.Fprintf(os.Stderr, "unable to WriteMsg to adapter:%v,error:%v\n", l.name, err)
-		}
-	}
-}
-
-func (bl *BeeLogger) Write(p []byte) (n int, err error) {
-	if len(p) == 0 {
-		return 0, nil
-	}
-	// writeMsg will always add a '\n' character
-	if p[len(p)-1] == '\n' {
-		p = p[0 : len(p)-1]
-	}
-	// set levelLoggerImpl to ensure all log message will be write out
-	err = bl.writeMsg(levelLoggerImpl, string(p))
-	if err == nil {
-		return len(p), err
-	}
-	return 0, err
-}
-
-func (bl *BeeLogger) writeMsg(logLevel int, msg string, v ...interface{}) error {
-	if !bl.init {
-		bl.lock.Lock()
-		bl.setLogger(AdapterConsole)
-		bl.lock.Unlock()
-	}
-
-	if len(v) > 0 {
-		msg = fmt.Sprintf(msg, v...)
-	}
-	when := time.Now()
-	if bl.enableFuncCallDepth {
-		_, file, line, ok := runtime.Caller(bl.loggerFuncCallDepth)
-		if !ok {
-			file = "???"
-			line = 0
-		} else {
-			if strings.Contains(file, "<autogenerated>") {
-				_, file, line, ok = runtime.Caller(bl.loggerFuncCallDepth + 1)
-				if !ok {
-					file = "???"
-					line = 0
-				}
-			}
-		}
-		_, filename := path.Split(file)
-		msg = "[" + filename + ":" + strconv.FormatInt(int64(line), 10) + "] " + msg
-	}
-
-	//set level info in front of filename info
-	if logLevel == levelLoggerImpl {
-		// set to emergency to ensure all log will be print out correctly
-		logLevel = LevelEmergency
-	} else {
-		msg = levelPrefix[logLevel] + msg
-	}
-
-	if bl.asynchronous {
-		lm := logMsgPool.Get().(*logMsg)
-		lm.level = logLevel
-		lm.msg = msg
-		lm.when = when
-		bl.msgChan <- lm
-	} else {
-		bl.writeToLoggers(when, msg, logLevel)
-	}
-	return nil
-}
-
-// SetLevel Set log message level.
-// If message level (such as LevelDebug) is higher than logger level (such as LevelWarning),
-// log providers will not even be sent the message.
-func (bl *BeeLogger) SetLevel(l int) {
-	bl.level = l
-}
-
-// SetLogFuncCallDepth set log funcCallDepth
-func (bl *BeeLogger) SetLogFuncCallDepth(d int) {
-	bl.loggerFuncCallDepth = d
-}
-
-// GetLogFuncCallDepth return log funcCallDepth for wrapper
-func (bl *BeeLogger) GetLogFuncCallDepth() int {
-	return bl.loggerFuncCallDepth
-}
-
-// EnableFuncCallDepth enable log funcCallDepth
-func (bl *BeeLogger) EnableFuncCallDepth(b bool) {
-	bl.enableFuncCallDepth = b
-}
-
-// start logger chan reading.
-// when chan is not empty, write logs.
-func (bl *BeeLogger) startLogger() {
-	gameOver := false
-	for {
-		select {
-		case bm := <-bl.msgChan:
-			bl.writeToLoggers(bm.when, bm.msg, bm.level)
-			logMsgPool.Put(bm)
-		case sg := <-bl.signalChan:
-			// Now should only send "flush" or "close" to bl.signalChan
-			bl.flush()
-			if sg == "close" {
-				for _, l := range bl.outputs {
-					l.Destroy()
-				}
-				bl.outputs = nil
-				gameOver = true
-			}
-			bl.wg.Done()
-		}
-		if gameOver {
-			break
-		}
-	}
-}
-
-// Emergency Log EMERGENCY level message.
-func (bl *BeeLogger) Emergency(format string, v ...interface{}) {
-	if LevelEmergency > bl.level {
-		return
-	}
-	bl.writeMsg(LevelEmergency, format, v...)
-}
-
-// Alert Log ALERT level message.
-func (bl *BeeLogger) Alert(format string, v ...interface{}) {
-	if LevelAlert > bl.level {
-		return
-	}
-	bl.writeMsg(LevelAlert, format, v...)
-}
-
-// Critical Log CRITICAL level message.
-func (bl *BeeLogger) Critical(format string, v ...interface{}) {
-	if LevelCritical > bl.level {
-		return
-	}
-	bl.writeMsg(LevelCritical, format, v...)
-}
-
-// Error Log ERROR level message.
-func (bl *BeeLogger) Error(format string, v ...interface{}) {
-	if LevelError > bl.level {
-		return
-	}
-	bl.writeMsg(LevelError, format, v...)
-}
-
-// Warning Log WARNING level message.
-func (bl *BeeLogger) Warning(format string, v ...interface{}) {
-	if LevelWarn > bl.level {
-		return
-	}
-	bl.writeMsg(LevelWarn, format, v...)
-}
-
-// Notice Log NOTICE level message.
-func (bl *BeeLogger) Notice(format string, v ...interface{}) {
-	if LevelNotice > bl.level {
-		return
-	}
-	bl.writeMsg(LevelNotice, format, v...)
-}
-
-// Informational Log INFORMATIONAL level message.
-func (bl *BeeLogger) Informational(format string, v ...interface{}) {
-	if LevelInfo > bl.level {
-		return
-	}
-	bl.writeMsg(LevelInfo, format, v...)
-}
-
-// Debug Log DEBUG level message.
-func (bl *BeeLogger) Debug(format string, v ...interface{}) {
-	if LevelDebug > bl.level {
-		return
-	}
-	bl.writeMsg(LevelDebug, format, v...)
-}
-
-// Warn Log WARN level message.
-// compatibility alias for Warning()
-func (bl *BeeLogger) Warn(format string, v ...interface{}) {
-	if LevelWarn > bl.level {
-		return
-	}
-	bl.writeMsg(LevelWarn, format, v...)
-}
-
-// Info Log INFO level message.
-// compatibility alias for Informational()
-func (bl *BeeLogger) Info(format string, v ...interface{}) {
-	if LevelInfo > bl.level {
-		return
-	}
-	bl.writeMsg(LevelInfo, format, v...)
-}
-
-// Trace Log TRACE level message.
-// compatibility alias for Debug()
-func (bl *BeeLogger) Trace(format string, v ...interface{}) {
-	if LevelTrace > bl.level {
-		return
-	}
-	bl.writeMsg(LevelTrace, format, v...)
-}
-
-// Flush flush all chan data.
-func (bl *BeeLogger) Flush() {
-	if bl.asynchronous {
-		bl.signalChan <- "flush"
-		bl.wg.Wait()
-		bl.wg.Add(1)
-		return
-	}
-	bl.flush()
-}
-
-// Close close logger, flush all chan data and destroy all adapters in BeeLogger.
-func (bl *BeeLogger) Close() {
-	if bl.asynchronous {
-		bl.signalChan <- "close"
-		bl.wg.Wait()
-		close(bl.msgChan)
-	} else {
-		bl.flush()
-		for _, l := range bl.outputs {
-			l.Destroy()
-		}
-		bl.outputs = nil
-	}
-	close(bl.signalChan)
-}
-
-// Reset close all outputs, and set bl.outputs to nil
-func (bl *BeeLogger) Reset() {
-	bl.Flush()
-	for _, l := range bl.outputs {
-		l.Destroy()
-	}
-	bl.outputs = nil
-}
-
-func (bl *BeeLogger) flush() {
-	if bl.asynchronous {
-		for {
-			if len(bl.msgChan) > 0 {
-				bm := <-bl.msgChan
-				bl.writeToLoggers(bm.when, bm.msg, bm.level)
-				logMsgPool.Put(bm)
-				continue
-			}
-			break
-		}
-	}
-	for _, l := range bl.outputs {
-		l.Flush()
-	}
-}
-
-// beeLogger references the used application logger.
-var beeLogger *BeeLogger = NewLogger()
-
-// GetLogger returns the default BeeLogger
-func GetBeeLogger() *BeeLogger {
-	return beeLogger
-}
-
-var beeLoggerMap = struct {
-	sync.RWMutex
-	logs map[string]*log.Logger
-}{
-	logs: map[string]*log.Logger{},
-}
-
-// GetLogger returns the default BeeLogger
-func GetLogger(prefixes ...string) *log.Logger {
-	prefix := append(prefixes, "")[0]
-	if prefix != "" {
-		prefix = fmt.Sprintf(`[%s] `, strings.ToUpper(prefix))
-	}
-	beeLoggerMap.RLock()
-	l, ok := beeLoggerMap.logs[prefix]
-	if ok {
-		beeLoggerMap.RUnlock()
-		return l
-	}
-	beeLoggerMap.RUnlock()
-	beeLoggerMap.Lock()
-	defer beeLoggerMap.Unlock()
-	l, ok = beeLoggerMap.logs[prefix]
-	if !ok {
-		l = log.New(beeLogger, prefix, 0)
-		beeLoggerMap.logs[prefix] = l
-	}
-	return l
-}
-
-// Reset will remove all the adapter
-func Reset() {
-	beeLogger.Reset()
-}
-
-func Async(msgLen ...int64) *BeeLogger {
-	return beeLogger.Async(msgLen...)
-}
-
-// SetLevel sets the global log level used by the simple logger.
-func SetLevel(l int) {
-	beeLogger.SetLevel(l)
-}
-
-// EnableFuncCallDepth enable log funcCallDepth
-func EnableFuncCallDepth(b bool) {
-	beeLogger.enableFuncCallDepth = b
-}
-
-// SetLogFuncCall set the CallDepth, default is 4
-func SetLogFuncCall(b bool) {
-	beeLogger.EnableFuncCallDepth(b)
-	beeLogger.SetLogFuncCallDepth(4)
-}
-
-// SetLogFuncCallDepth set log funcCallDepth
-func SetLogFuncCallDepth(d int) {
-	beeLogger.loggerFuncCallDepth = d
-}
-
-// SetLogger sets a new logger.
-func SetLogger(adapter string, config ...string) error {
-	err := beeLogger.SetLogger(adapter, config...)
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-// Emergency logs a message at emergency level.
-func Emergency(f interface{}, v ...interface{}) {
-	beeLogger.Emergency(formatLog(f, v...))
-}
-
-// Alert logs a message at alert level.
-func Alert(f interface{}, v ...interface{}) {
-	beeLogger.Alert(formatLog(f, v...))
-}
-
-// Critical logs a message at critical level.
-func Critical(f interface{}, v ...interface{}) {
-	beeLogger.Critical(formatLog(f, v...))
-}
-
-// Error logs a message at error level.
-func Error(f interface{}, v ...interface{}) {
-	beeLogger.Error(formatLog(f, v...))
-}
-
-// Warning logs a message at warning level.
-func Warning(f interface{}, v ...interface{}) {
-	beeLogger.Warn(formatLog(f, v...))
-}
-
-// Warn compatibility alias for Warning()
-func Warn(f interface{}, v ...interface{}) {
-	beeLogger.Warn(formatLog(f, v...))
-}
-
-// Notice logs a message at notice level.
-func Notice(f interface{}, v ...interface{}) {
-	beeLogger.Notice(formatLog(f, v...))
-}
-
-// Informational logs a message at info level.
-func Informational(f interface{}, v ...interface{}) {
-	beeLogger.Info(formatLog(f, v...))
-}
-
-// Info compatibility alias for Warning()
-func Info(f interface{}, v ...interface{}) {
-	beeLogger.Info(formatLog(f, v...))
-}
-
-// Debug logs a message at debug level.
-func Debug(f interface{}, v ...interface{}) {
-	beeLogger.Debug(formatLog(f, v...))
-}
-
-// Trace logs a message at trace level.
-// compatibility alias for Warning()
-func Trace(f interface{}, v ...interface{}) {
-	beeLogger.Trace(formatLog(f, v...))
-}
-
-func formatLog(f interface{}, v ...interface{}) string {
-	var msg string
-	switch f.(type) {
-	case string:
-		msg = f.(string)
-		if len(v) == 0 {
-			return msg
-		}
-		if strings.Contains(msg, "%") && !strings.Contains(msg, "%%") {
-			//format string
-		} else {
-			//do not contain format char
-			msg += strings.Repeat(" %v", len(v))
-		}
-	default:
-		msg = fmt.Sprint(f)
-		if len(v) == 0 {
-			return msg
-		}
-		msg += strings.Repeat(" %v", len(v))
-	}
-	return fmt.Sprintf(msg, v...)
-}

+ 0 - 188
vendor/github.com/fatedier/beego/logs/logger.go

@@ -1,188 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logs
-
-import (
-	"fmt"
-	"io"
-	"os"
-	"sync"
-	"time"
-)
-
-type logWriter struct {
-	sync.Mutex
-	writer io.Writer
-}
-
-func newLogWriter(wr io.Writer) *logWriter {
-	return &logWriter{writer: wr}
-}
-
-func (lg *logWriter) println(when time.Time, msg string) {
-	lg.Lock()
-	h, _ := formatTimeHeader(when)
-	lg.writer.Write(append(append(h, msg...), '\n'))
-	lg.Unlock()
-}
-
-type outputMode int
-
-// DiscardNonColorEscSeq supports the divided color escape sequence.
-// But non-color escape sequence is not output.
-// Please use the OutputNonColorEscSeq If you want to output a non-color
-// escape sequences such as ncurses. However, it does not support the divided
-// color escape sequence.
-const (
-	_ outputMode = iota
-	DiscardNonColorEscSeq
-	OutputNonColorEscSeq
-)
-
-// NewAnsiColorWriter creates and initializes a new ansiColorWriter
-// using io.Writer w as its initial contents.
-// In the console of Windows, which change the foreground and background
-// colors of the text by the escape sequence.
-// In the console of other systems, which writes to w all text.
-func NewAnsiColorWriter(w io.Writer) io.Writer {
-	return NewModeAnsiColorWriter(w, DiscardNonColorEscSeq)
-}
-
-// NewModeAnsiColorWriter create and initializes a new ansiColorWriter
-// by specifying the outputMode.
-func NewModeAnsiColorWriter(w io.Writer, mode outputMode) io.Writer {
-	if _, ok := w.(*ansiColorWriter); !ok {
-		return &ansiColorWriter{
-			w:    w,
-			mode: mode,
-		}
-	}
-	return w
-}
-
-const (
-	y1  = `0123456789`
-	y2  = `0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789`
-	y3  = `0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999`
-	y4  = `0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789`
-	mo1 = `000000000111`
-	mo2 = `123456789012`
-	d1  = `0000000001111111111222222222233`
-	d2  = `1234567890123456789012345678901`
-	h1  = `000000000011111111112222`
-	h2  = `012345678901234567890123`
-	mi1 = `000000000011111111112222222222333333333344444444445555555555`
-	mi2 = `012345678901234567890123456789012345678901234567890123456789`
-	s1  = `000000000011111111112222222222333333333344444444445555555555`
-	s2  = `012345678901234567890123456789012345678901234567890123456789`
-)
-
-func formatTimeHeader(when time.Time) ([]byte, int) {
-	y, mo, d := when.Date()
-	h, mi, s := when.Clock()
-	//len("2006/01/02 15:04:05 ")==20
-	var buf [20]byte
-
-	buf[0] = y1[y/1000%10]
-	buf[1] = y2[y/100]
-	buf[2] = y3[y-y/100*100]
-	buf[3] = y4[y-y/100*100]
-	buf[4] = '/'
-	buf[5] = mo1[mo-1]
-	buf[6] = mo2[mo-1]
-	buf[7] = '/'
-	buf[8] = d1[d-1]
-	buf[9] = d2[d-1]
-	buf[10] = ' '
-	buf[11] = h1[h]
-	buf[12] = h2[h]
-	buf[13] = ':'
-	buf[14] = mi1[mi]
-	buf[15] = mi2[mi]
-	buf[16] = ':'
-	buf[17] = s1[s]
-	buf[18] = s2[s]
-	buf[19] = ' '
-
-	return buf[0:], d
-}
-
-var (
-	green   = string([]byte{27, 91, 57, 55, 59, 52, 50, 109})
-	white   = string([]byte{27, 91, 57, 48, 59, 52, 55, 109})
-	yellow  = string([]byte{27, 91, 57, 55, 59, 52, 51, 109})
-	red     = string([]byte{27, 91, 57, 55, 59, 52, 49, 109})
-	blue    = string([]byte{27, 91, 57, 55, 59, 52, 52, 109})
-	magenta = string([]byte{27, 91, 57, 55, 59, 52, 53, 109})
-	cyan    = string([]byte{27, 91, 57, 55, 59, 52, 54, 109})
-
-	w32Green   = string([]byte{27, 91, 52, 50, 109})
-	w32White   = string([]byte{27, 91, 52, 55, 109})
-	w32Yellow  = string([]byte{27, 91, 52, 51, 109})
-	w32Red     = string([]byte{27, 91, 52, 49, 109})
-	w32Blue    = string([]byte{27, 91, 52, 52, 109})
-	w32Magenta = string([]byte{27, 91, 52, 53, 109})
-	w32Cyan    = string([]byte{27, 91, 52, 54, 109})
-
-	reset = string([]byte{27, 91, 48, 109})
-)
-
-func ColorByStatus(cond bool, code int) string {
-	switch {
-	case code >= 200 && code < 300:
-		return map[bool]string{true: green, false: w32Green}[cond]
-	case code >= 300 && code < 400:
-		return map[bool]string{true: white, false: w32White}[cond]
-	case code >= 400 && code < 500:
-		return map[bool]string{true: yellow, false: w32Yellow}[cond]
-	default:
-		return map[bool]string{true: red, false: w32Red}[cond]
-	}
-}
-
-func ColorByMethod(cond bool, method string) string {
-	switch method {
-	case "GET":
-		return map[bool]string{true: blue, false: w32Blue}[cond]
-	case "POST":
-		return map[bool]string{true: cyan, false: w32Cyan}[cond]
-	case "PUT":
-		return map[bool]string{true: yellow, false: w32Yellow}[cond]
-	case "DELETE":
-		return map[bool]string{true: red, false: w32Red}[cond]
-	case "PATCH":
-		return map[bool]string{true: green, false: w32Green}[cond]
-	case "HEAD":
-		return map[bool]string{true: magenta, false: w32Magenta}[cond]
-	case "OPTIONS":
-		return map[bool]string{true: white, false: w32White}[cond]
-	default:
-		return reset
-	}
-}
-
-// Guard Mutex to guarantee atomicity of W32Debug(string) function
-var mu sync.Mutex
-
-// Helper method to output colored logs in Windows terminals
-func W32Debug(msg string) {
-	mu.Lock()
-	defer mu.Unlock()
-
-	current := time.Now()
-	w := NewAnsiColorWriter(os.Stdout)
-
-	fmt.Fprintf(w, "[beego] %v %s\n", current.Format("2006/01/02 - 15:04:05"), msg)
-}

+ 0 - 116
vendor/github.com/fatedier/beego/logs/multifile.go

@@ -1,116 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logs
-
-import (
-	"encoding/json"
-	"time"
-)
-
-// A filesLogWriter manages several fileLogWriter
-// filesLogWriter will write logs to the file in json configuration  and write the same level log to correspond file
-// means if the file name in configuration is project.log filesLogWriter will create project.error.log/project.debug.log
-// and write the error-level logs to project.error.log and write the debug-level logs to project.debug.log
-// the rotate attribute also  acts like fileLogWriter
-type multiFileLogWriter struct {
-	writers       [LevelDebug + 1 + 1]*fileLogWriter // the last one for fullLogWriter
-	fullLogWriter *fileLogWriter
-	Separate      []string `json:"separate"`
-}
-
-var levelNames = [...]string{"emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"}
-
-// Init file logger with json config.
-// jsonConfig like:
-//	{
-//	"filename":"logs/beego.log",
-//	"maxLines":0,
-//	"maxsize":0,
-//	"daily":true,
-//	"maxDays":15,
-//	"rotate":true,
-//  	"perm":0600,
-//	"separate":["emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"],
-//	}
-
-func (f *multiFileLogWriter) Init(config string) error {
-	writer := newFileWriter().(*fileLogWriter)
-	err := writer.Init(config)
-	if err != nil {
-		return err
-	}
-	f.fullLogWriter = writer
-	f.writers[LevelDebug+1] = writer
-
-	//unmarshal "separate" field to f.Separate
-	json.Unmarshal([]byte(config), f)
-
-	jsonMap := map[string]interface{}{}
-	json.Unmarshal([]byte(config), &jsonMap)
-
-	for i := LevelEmergency; i < LevelDebug+1; i++ {
-		for _, v := range f.Separate {
-			if v == levelNames[i] {
-				jsonMap["filename"] = f.fullLogWriter.fileNameOnly + "." + levelNames[i] + f.fullLogWriter.suffix
-				jsonMap["level"] = i
-				bs, _ := json.Marshal(jsonMap)
-				writer = newFileWriter().(*fileLogWriter)
-				writer.Init(string(bs))
-				f.writers[i] = writer
-			}
-		}
-	}
-
-	return nil
-}
-
-func (f *multiFileLogWriter) Destroy() {
-	for i := 0; i < len(f.writers); i++ {
-		if f.writers[i] != nil {
-			f.writers[i].Destroy()
-		}
-	}
-}
-
-func (f *multiFileLogWriter) WriteMsg(when time.Time, msg string, level int) error {
-	if f.fullLogWriter != nil {
-		f.fullLogWriter.WriteMsg(when, msg, level)
-	}
-	for i := 0; i < len(f.writers)-1; i++ {
-		if f.writers[i] != nil {
-			if level == f.writers[i].Level {
-				f.writers[i].WriteMsg(when, msg, level)
-			}
-		}
-	}
-	return nil
-}
-
-func (f *multiFileLogWriter) Flush() {
-	for i := 0; i < len(f.writers); i++ {
-		if f.writers[i] != nil {
-			f.writers[i].Flush()
-		}
-	}
-}
-
-// newFilesWriter create a FileLogWriter returning as LoggerInterface.
-func newFilesWriter() Logger {
-	return &multiFileLogWriter{}
-}
-
-func init() {
-	Register(AdapterMultiFile, newFilesWriter)
-}

部分文件因文件數量過多而無法顯示