Compare commits

..

4 Commits

Author SHA1 Message Date
abd0575d71 Save 2025-03-21 23:55:47 +03:00
206da7c7b7 Save 2025-03-21 23:24:24 +03:00
b6068c5aa1 Save 2025-03-21 22:42:45 +03:00
3ecffa7779 Save state 2025-03-21 21:47:32 +03:00
73 changed files with 13429 additions and 20 deletions

59
.dockerignore Normal file
View File

@ -0,0 +1,59 @@
# .dockerignore file for the project
# Language-specific patterns
/vendor/
*.test
.go-cache
# Development artifacts
.idea/
.vscode/
*.swp
*.swo
dist/
build/
out/
test/
tests/
*_test.go
debug/
*.log
# Version control
.git/
.gitignore
# Environment and secrets
.env*
*.env
*.pem
*.key
*.crt
config.local.*
*.local.yml
# Project-specific patterns
docs/
*.md
README*
Dockerfile*
docker-compose*
tmp/
temp/
*.tmp
.local/
local/
# Exclude the following files from being ignored
!go.mod
!go.sum
!cmd/main.go
!internal/config/config.go
!internal/domain/tasks/task.go
!internal/domain/users/user.go
!internal/persistance/task_manager.go
!internal/persistance/task_repository.go
!internal/persistance/user_repository.go
!database/init/00-users-init.sql
!database/init/01-tasks-init.sql
!deployments/task-manager.yaml

22
Dockerfile Normal file
View File

@ -0,0 +1,22 @@
# Этап сборки
FROM golang:1.21-alpine AS builder
# Устанавливаем рабочую директорию
WORKDIR /app
# Копируем файлы проекта
COPY go.mod go.sum ./
RUN go mod download
COPY . ./
# Сборка бинарного файла
RUN go build -o /task_manager ./cmd/task_manager
# Финальный минимальный образ
FROM scratch AS final
WORKDIR /app
COPY --from=builder /task_manager .
EXPOSE 8080
CMD ["./task_manager"]

35
README.md Normal file
View File

@ -0,0 +1,35 @@
# Running the Project with Docker
This section provides instructions to build and run the project using Docker.
## Requirements
- Docker version 20.10 or later
- Docker Compose version 1.29 or later
## Environment Variables
- `POSTGRES_USER`: Database username (default: `user`)
- `POSTGRES_PASSWORD`: Database password (default: `password`)
- `POSTGRES_DB`: Database name (default: `appdb`)
## Build and Run Instructions
1. Clone the repository and navigate to the project root directory.
2. Build and start the services using Docker Compose:
```bash
docker-compose up --build
```
3. Access the application at `http://localhost:8080`.
## Configuration
- The application binary is built using Go version 1.24.
- The database service uses the `postgres:latest` image.
## Exposed Ports
- Application: `8080` (mapped to `8080` on the host)
- Database: Not exposed to the host
For further details, refer to the project documentation or contact the maintainers.

View File

@ -1,7 +0,0 @@
package main
import "fmt"
func main() {
fmt.Println("Hello")
}

50
cmd/task_manager/main.go Normal file
View File

@ -0,0 +1,50 @@
package main
import (
"database/sql"
"fmt"
"log"
"net/http"
"time"
_ "github.com/lib/pq"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"task_manager/internal/app"
"task_manager/internal/config"
"task_manager/internal/persistance"
)
func main() {
r := mux.NewRouter()
db, err := sql.Open("postgres", config.CONNECTION_STRING)
if err != nil {
log.Fatalf("Open error database: %v", err)
}
repo := persistance.NewTaskManager(db)
task := app.NewTaskManager(repo)
task.RegisterRoutes(r)
corsMiddleware := handlers.CORS(
handlers.AllowedOrigins([]string{"*"}),
handlers.AllowedMethods([]string{"GET", "POST", "OPTIONS"}),
handlers.AllowedHeaders([]string{"Content-Type", "Authorization"}),
)
address := fmt.Sprintf(":%d", 8080)
log.Printf("The server is running on the port %s\n", address)
srv := &http.Server{
Addr: address,
Handler: corsMiddleware(r),
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
IdleTimeout: 20 * time.Second,
}
if err := srv.ListenAndServe(); err != nil {
log.Fatalf("Server startup error: %v", err)
}
}

30
compose.yaml Normal file
View File

@ -0,0 +1,30 @@
services:
app:
build:
context: .
ports:
- 8080:8080
restart: unless-stopped
depends_on:
- database
database:
image: postgres:latest
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
ports:
- 5432:5432
volumes:
- ./database/init:/docker-entrypoint-initdb.d
restart: unless-stopped
swagger-ui:
image: swaggerapi/swagger-ui:latest
container_name: swagger-ui
ports:
- "8081:8080"
volumes:
- ./config/swagger.yaml:/tmp/swagger.yaml:ro
environment:
- SWAGGER_JSON=/tmp/swagger.yaml
restart: unless-stopped

254
config/swagger.yaml Normal file
View File

@ -0,0 +1,254 @@
openapi: "3.0.0"
info:
title: "Task Manager API"
description: "API для управления задачами, проектами и пользователями"
version: "1.0.0"
servers:
- url: "http://localhost:8080/api/v1"
description: "Локальный сервер для тестирования API"
paths:
/user/login:
post:
summary: "Вход пользователя в систему"
description: "Авторизация пользователя по имени и паролю"
requestBody:
required: true
content:
application/json:
schema:
type: object
properties:
name:
type: string
example: "johndoe"
description: "Имя пользователя"
password:
type: string
example: "12345"
description: "Пароль пользователя"
responses:
"200":
description: "Авторизация успешна"
content:
application/json:
schema:
type: object
properties:
id:
type: integer
name:
type: string
email:
type: string
"401":
description: "Недействительный логин или пароль"
content:
application/json:
schema:
$ref: "#/components/schemas/ErrorResponse"
/user/register:
post:
summary: "Регистрация нового пользователя"
description: "Создание нового аккаунта пользователя"
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/User"
responses:
"201":
description: "Пользователь успешно зарегистрирован"
content:
application/json:
schema:
$ref: "#/components/schemas/User"
"400":
description: "Ошибка в запросе"
content:
application/json:
schema:
$ref: "#/components/schemas/ErrorResponse"
/user/tasks:
get:
summary: "Получить задачи пользователя"
description: "Возвращает список задач для указанного пользователя"
parameters:
- name: "id_user"
in: query
required: true
schema:
type: integer
example: 1
description: "ID пользователя"
responses:
"200":
description: "Список задач пользователя"
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/Task"
"400":
description: "Отсутствует ID пользователя"
content:
application/json:
schema:
$ref: "#/components/schemas/ErrorResponse"
/user/projects:
get:
summary: "Получить проекты пользователя"
description: "Возвращает список проектов, связанных с пользователем"
parameters:
- name: "id_user"
in: query
required: true
schema:
type: integer
example: 1
description: "ID пользователя"
responses:
"200":
description: "Список проектов"
content:
application/json:
schema:
type: array
items:
type: string
"400":
description: "Отсутствует ID пользователя"
content:
application/json:
schema:
$ref: "#/components/schemas/ErrorResponse"
/projects:
post:
summary: "Создать проект"
description: "Создание нового проекта (в разработке)"
responses:
"501":
description: "Не реализовано"
content:
application/json:
schema:
$ref: "#/components/schemas/UnimplementedResponse"
get:
summary: "Получить проекты"
description: "Получение списка всех проектов (в разработке)"
responses:
"501":
description: "Не реализовано"
content:
application/json:
schema:
$ref: "#/components/schemas/UnimplementedResponse"
/projects/tasks:
post:
summary: "Создать задачу"
description: "Создание новой задачи для проекта"
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/Task"
responses:
"201":
description: "Задача успешно создана"
content:
application/json:
schema:
$ref: "#/components/schemas/Task"
"400":
description: "Ошибка в запросе"
content:
application/json:
schema:
$ref: "#/components/schemas/ErrorResponse"
/projects/tasks/{task_id}:
patch:
summary: "Обновить задачу"
description: "Изменение данных задачи по ID"
parameters:
- name: "task_id"
in: path
required: true
schema:
type: integer
example: 42
description: "ID задачи"
requestBody:
required: true
content:
application/json:
schema:
$ref: "#/components/schemas/Task"
responses:
"200":
description: "Задача успешно обновлена"
content:
application/json:
schema:
$ref: "#/components/schemas/Task"
"400":
description: "Ошибка в запросе"
content:
application/json:
schema:
$ref: "#/components/schemas/ErrorResponse"
/projects/sprints:
post:
summary: "Создать спринт"
description: "Создание спринта в проекте (в разработке)"
responses:
"501":
description: "Не реализовано"
content:
application/json:
schema:
$ref: "#/components/schemas/UnimplementedResponse"
components:
schemas:
ErrorResponse:
type: object
properties:
code:
type: integer
description: "HTTP-код ошибки"
message:
type: string
description: "Сообщение об ошибке"
error:
type: string
description: "Машинное описание ошибки"
UnimplementedResponse:
type: object
properties:
message:
type: string
example: "Not implemented"
User:
type: object
properties:
id:
type: integer
name:
type: string
email:
type: string
password:
type: string
Task:
type: object
properties:
id:
type: integer
name:
type: string
description:
type: string

7
go.mod
View File

@ -3,6 +3,9 @@ module task_manager
go 1.19
require (
github.com/gorilla/mux v1.8.1 // indirect
github.com/lib/pq v1.10.9 // indirect
github.com/gorilla/handlers v1.5.2
github.com/gorilla/mux v1.8.1
github.com/lib/pq v1.10.9
)
require github.com/felixge/httpsnoop v1.0.3 // indirect

4
go.sum
View File

@ -1,3 +1,7 @@
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE=
github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=

View File

@ -1,7 +1,13 @@
package app
import (
"encoding/json"
"errors"
"net/http"
"strconv"
"task_manager/internal/domain/tasks"
"task_manager/internal/domain/users"
"task_manager/internal/persistance"
gorilla "github.com/gorilla/mux"
)
@ -35,16 +41,174 @@ func (rm *TaskManager) RegisterRoutes(router *gorilla.Router) {
}
func (rm *TaskManager) handleLogin(w http.ResponseWriter, r *http.Request) {}
func (rm *TaskManager) handleRegistrateUser(w http.ResponseWriter, r *http.Request) {}
func (rm *TaskManager) handleUserTasks(w http.ResponseWriter, r *http.Request) {}
func (rm *TaskManager) handleUserProjects(w http.ResponseWriter, r *http.Request) {}
func (rm *TaskManager) handleAssignUserToProject(w http.ResponseWriter, r *http.Request) {}
func (rm *TaskManager) handleLogin(w http.ResponseWriter, r *http.Request) {
var input struct {
Name string `json:"name"`
Password string `json:"password"`
}
func (rm *TaskManager) handleCreateProject(w http.ResponseWriter, r *http.Request) {}
func (rm *TaskManager) handleCreateTask(w http.ResponseWriter, r *http.Request) {}
func (rm *TaskManager) handleCreateSprint(w http.ResponseWriter, r *http.Request) {}
func (rm *TaskManager) handleGetProjectUsers(w http.ResponseWriter, r *http.Request) {}
err := json.NewDecoder(r.Body).Decode(&input)
if err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload", err)
return
}
func (rm *TaskManager) handleUpdateTask(w http.ResponseWriter, r *http.Request) {}
func (rm *TaskManager) handleGetProjects(w http.ResponseWriter, r *http.Request) {}
user, err := rm.repo.GetUser(input.Name)
if err != nil || user.Password != persistance.GetMD5Hash(input.Password) {
respondWithError(w, http.StatusUnauthorized, "Invalid username or password", errors.New("authentication failed"))
return
}
respondWithJSON(w, http.StatusOK, user)
}
func (rm *TaskManager) handleRegistrateUser(w http.ResponseWriter, r *http.Request) {
var user users.User
err := json.NewDecoder(r.Body).Decode(&user)
if err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload", err)
return
}
err = rm.repo.AddUser(&user)
if err != nil {
respondWithError(w, http.StatusInternalServerError, "Failed to create user", err)
return
}
respondWithJSON(w, http.StatusCreated, user)
}
func (rm *TaskManager) handleUserTasks(w http.ResponseWriter, r *http.Request) {
userID := r.URL.Query().Get("id_user")
if userID == "" {
respondWithError(w, http.StatusBadRequest, "Missing user ID", errors.New("id_user parameter required"))
return
}
id, err := strconv.Atoi(userID)
if err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid user ID", err)
return
}
tasks, err := rm.repo.GetUserTasks(id)
if err != nil {
respondWithError(w, http.StatusInternalServerError, "Failed to fetch user tasks", err)
return
}
respondWithJSON(w, http.StatusOK, tasks)
}
func (rm *TaskManager) handleUserProjects(w http.ResponseWriter, r *http.Request) {
userID := r.URL.Query().Get("id_user")
if userID == "" {
respondWithError(w, http.StatusBadRequest, "Missing user ID", errors.New("id_user parameter required"))
return
}
_, err := strconv.Atoi(userID)
if err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid user ID", err)
return
}
// Заглушка: здесь предполагается метод получения проектов пользователя
// Для реализации необходимо будет добавить соответствующий метод в репозиторий
projects := []string{"Project 1", "Project 2"} // Пример данных
respondWithJSON(w, http.StatusOK, projects)
}
func (rm *TaskManager) handleAssignUserToProject(w http.ResponseWriter, r *http.Request) {
var input struct {
IdUser int `json:"id_user"`
IdProject int `json:"id_project"`
}
err := json.NewDecoder(r.Body).Decode(&input)
if err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload", err)
return
}
// Заглушка: реализация привязки пользователя к проекту
// Для добавления необходимо доработать метод в репозитории
respondWithJSON(w, http.StatusOK, map[string]string{
"message": "User assigned to project successfully",
})
}
func (rm *TaskManager) handleCreateProject(w http.ResponseWriter, r *http.Request) {
respondWithJSON(w, http.StatusNotImplemented, map[string]string{
"message": "Create project not implemented yet",
})
}
func (rm *TaskManager) handleCreateTask(w http.ResponseWriter, r *http.Request) {
var task tasks.Task
err := json.NewDecoder(r.Body).Decode(&task)
if err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload", err)
return
}
err = rm.repo.AddTask(task)
if err != nil {
respondWithError(w, http.StatusInternalServerError, "Failed to create task", err)
return
}
respondWithJSON(w, http.StatusCreated, task)
}
func (rm *TaskManager) handleCreateSprint(w http.ResponseWriter, r *http.Request) {
taskID := gorilla.Vars(r)["task_id"]
id, err := strconv.Atoi(taskID)
if err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid task ID", err)
return
}
var task tasks.Task
err = json.NewDecoder(r.Body).Decode(&task)
if err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload", err)
return
}
task.Id = id
err = rm.repo.UpdateTask(task)
if err != nil {
respondWithError(w, http.StatusInternalServerError, "Failed to update task", err)
return
}
respondWithJSON(w, http.StatusOK, task)
}
func (rm *TaskManager) handleGetProjectUsers(w http.ResponseWriter, r *http.Request) {
respondWithJSON(w, http.StatusNotImplemented, map[string]string{
"message": "Get projects not implemented yet",
})
}
func (rm *TaskManager) handleUpdateTask(w http.ResponseWriter, r *http.Request) {
respondWithJSON(w, http.StatusNotImplemented, map[string]string{
"message": "Create sprint not implemented yet",
})
}
func (rm *TaskManager) handleGetProjects(w http.ResponseWriter, r *http.Request) {
respondWithJSON(w, http.StatusNotImplemented, map[string]string{
"message": "Get project users not implemented yet",
})
}
func respondWithError(w http.ResponseWriter, code int, message string, err error) {
w.WriteHeader(code)
json.NewEncoder(w).Encode(ErrorResponse{
Code: code,
Message: message,
Error: err.Error(),
})
}
func respondWithJSON(w http.ResponseWriter, code int, payload interface{}) {
w.WriteHeader(code)
json.NewEncoder(w).Encode(payload)
}

0
vendor/github.com/felixge/httpsnoop/.gitignore generated vendored Normal file
View File

6
vendor/github.com/felixge/httpsnoop/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,6 @@
language: go
go:
- 1.6
- 1.7
- 1.8

19
vendor/github.com/felixge/httpsnoop/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,19 @@
Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

10
vendor/github.com/felixge/httpsnoop/Makefile generated vendored Normal file
View File

@ -0,0 +1,10 @@
.PHONY: ci generate clean
ci: clean generate
go test -v ./...
generate:
go generate .
clean:
rm -rf *_generated*.go

95
vendor/github.com/felixge/httpsnoop/README.md generated vendored Normal file
View File

@ -0,0 +1,95 @@
# httpsnoop
Package httpsnoop provides an easy way to capture http related metrics (i.e.
response time, bytes written, and http status code) from your application's
http.Handlers.
Doing this requires non-trivial wrapping of the http.ResponseWriter interface,
which is also exposed for users interested in a more low-level API.
[![GoDoc](https://godoc.org/github.com/felixge/httpsnoop?status.svg)](https://godoc.org/github.com/felixge/httpsnoop)
[![Build Status](https://travis-ci.org/felixge/httpsnoop.svg?branch=master)](https://travis-ci.org/felixge/httpsnoop)
## Usage Example
```go
// myH is your app's http handler, perhaps a http.ServeMux or similar.
var myH http.Handler
// wrappedH wraps myH in order to log every request.
wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
m := httpsnoop.CaptureMetrics(myH, w, r)
log.Printf(
"%s %s (code=%d dt=%s written=%d)",
r.Method,
r.URL,
m.Code,
m.Duration,
m.Written,
)
})
http.ListenAndServe(":8080", wrappedH)
```
## Why this package exists
Instrumenting an application's http.Handler is surprisingly difficult.
However if you google for e.g. "capture ResponseWriter status code" you'll find
lots of advise and code examples that suggest it to be a fairly trivial
undertaking. Unfortunately everything I've seen so far has a high chance of
breaking your application.
The main problem is that a `http.ResponseWriter` often implements additional
interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and
`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter`
in your own struct that also implements the `http.ResponseWriter` interface
will hide the additional interfaces mentioned above. This has a high change of
introducing subtle bugs into any non-trivial application.
Another approach I've seen people take is to return a struct that implements
all of the interfaces above. However, that's also problematic, because it's
difficult to fake some of these interfaces behaviors when the underlying
`http.ResponseWriter` doesn't have an implementation. It's also dangerous,
because an application may choose to operate differently, merely because it
detects the presence of these additional interfaces.
This package solves this problem by checking which additional interfaces a
`http.ResponseWriter` implements, returning a wrapped version implementing the
exact same set of interfaces.
Additionally this package properly handles edge cases such as `WriteHeader` not
being called, or called more than once, as well as concurrent calls to
`http.ResponseWriter` methods, and even calls happening after the wrapped
`ServeHTTP` has already returned.
Unfortunately this package is not perfect either. It's possible that it is
still missing some interfaces provided by the go core (let me know if you find
one), and it won't work for applications adding their own interfaces into the
mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying
`http.ResponseWriter` and type-assert the result to its other interfaces.
However, hopefully the explanation above has sufficiently scared you of rolling
your own solution to this problem. httpsnoop may still break your application,
but at least it tries to avoid it as much as possible.
Anyway, the real problem here is that smuggling additional interfaces inside
`http.ResponseWriter` is a problematic design choice, but it probably goes as
deep as the Go language specification itself. But that's okay, I still prefer
Go over the alternatives ;).
## Performance
```
BenchmarkBaseline-8 20000 94912 ns/op
BenchmarkCaptureMetrics-8 20000 95461 ns/op
```
As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an
overhead of ~500 ns per http request on my machine. However, the margin of
error appears to be larger than that, therefor it should be reasonable to
assume that the overhead introduced by `CaptureMetrics` is absolutely
negligible.
## License
MIT

86
vendor/github.com/felixge/httpsnoop/capture_metrics.go generated vendored Normal file
View File

@ -0,0 +1,86 @@
package httpsnoop
import (
"io"
"net/http"
"time"
)
// Metrics holds metrics captured from CaptureMetrics.
type Metrics struct {
// Code is the first http response code passed to the WriteHeader func of
// the ResponseWriter. If no such call is made, a default code of 200 is
// assumed instead.
Code int
// Duration is the time it took to execute the handler.
Duration time.Duration
// Written is the number of bytes successfully written by the Write or
// ReadFrom function of the ResponseWriter. ResponseWriters may also write
// data to their underlaying connection directly (e.g. headers), but those
// are not tracked. Therefor the number of Written bytes will usually match
// the size of the response body.
Written int64
}
// CaptureMetrics wraps the given hnd, executes it with the given w and r, and
// returns the metrics it captured from it.
func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics {
return CaptureMetricsFn(w, func(ww http.ResponseWriter) {
hnd.ServeHTTP(ww, r)
})
}
// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the
// resulting metrics. This is very similar to CaptureMetrics (which is just
// sugar on top of this func), but is a more usable interface if your
// application doesn't use the Go http.Handler interface.
func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics {
m := Metrics{Code: http.StatusOK}
m.CaptureMetrics(w, fn)
return m
}
// CaptureMetrics wraps w and calls fn with the wrapped w and updates
// Metrics m with the resulting metrics. This is similar to CaptureMetricsFn,
// but allows one to customize starting Metrics object.
func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWriter)) {
var (
start = time.Now()
headerWritten bool
hooks = Hooks{
WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc {
return func(code int) {
next(code)
if !headerWritten {
m.Code = code
headerWritten = true
}
}
},
Write: func(next WriteFunc) WriteFunc {
return func(p []byte) (int, error) {
n, err := next(p)
m.Written += int64(n)
headerWritten = true
return n, err
}
},
ReadFrom: func(next ReadFromFunc) ReadFromFunc {
return func(src io.Reader) (int64, error) {
n, err := next(src)
headerWritten = true
m.Written += n
return n, err
}
},
}
)
fn(Wrap(w, hooks))
m.Duration += time.Since(start)
}

10
vendor/github.com/felixge/httpsnoop/docs.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
// Package httpsnoop provides an easy way to capture http related metrics (i.e.
// response time, bytes written, and http status code) from your application's
// http.Handlers.
//
// Doing this requires non-trivial wrapping of the http.ResponseWriter
// interface, which is also exposed for users interested in a more low-level
// API.
package httpsnoop
//go:generate go run codegen/main.go

View File

@ -0,0 +1,436 @@
// +build go1.8
// Code generated by "httpsnoop/codegen"; DO NOT EDIT
package httpsnoop
import (
"bufio"
"io"
"net"
"net/http"
)
// HeaderFunc is part of the http.ResponseWriter interface.
type HeaderFunc func() http.Header
// WriteHeaderFunc is part of the http.ResponseWriter interface.
type WriteHeaderFunc func(code int)
// WriteFunc is part of the http.ResponseWriter interface.
type WriteFunc func(b []byte) (int, error)
// FlushFunc is part of the http.Flusher interface.
type FlushFunc func()
// CloseNotifyFunc is part of the http.CloseNotifier interface.
type CloseNotifyFunc func() <-chan bool
// HijackFunc is part of the http.Hijacker interface.
type HijackFunc func() (net.Conn, *bufio.ReadWriter, error)
// ReadFromFunc is part of the io.ReaderFrom interface.
type ReadFromFunc func(src io.Reader) (int64, error)
// PushFunc is part of the http.Pusher interface.
type PushFunc func(target string, opts *http.PushOptions) error
// Hooks defines a set of method interceptors for methods included in
// http.ResponseWriter as well as some others. You can think of them as
// middleware for the function calls they target. See Wrap for more details.
type Hooks struct {
Header func(HeaderFunc) HeaderFunc
WriteHeader func(WriteHeaderFunc) WriteHeaderFunc
Write func(WriteFunc) WriteFunc
Flush func(FlushFunc) FlushFunc
CloseNotify func(CloseNotifyFunc) CloseNotifyFunc
Hijack func(HijackFunc) HijackFunc
ReadFrom func(ReadFromFunc) ReadFromFunc
Push func(PushFunc) PushFunc
}
// Wrap returns a wrapped version of w that provides the exact same interface
// as w. Specifically if w implements any combination of:
//
// - http.Flusher
// - http.CloseNotifier
// - http.Hijacker
// - io.ReaderFrom
// - http.Pusher
//
// The wrapped version will implement the exact same combination. If no hooks
// are set, the wrapped version also behaves exactly as w. Hooks targeting
// methods not supported by w are ignored. Any other hooks will intercept the
// method they target and may modify the call's arguments and/or return values.
// The CaptureMetrics implementation serves as a working example for how the
// hooks can be used.
func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter {
rw := &rw{w: w, h: hooks}
_, i0 := w.(http.Flusher)
_, i1 := w.(http.CloseNotifier)
_, i2 := w.(http.Hijacker)
_, i3 := w.(io.ReaderFrom)
_, i4 := w.(http.Pusher)
switch {
// combination 1/32
case !i0 && !i1 && !i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
}{rw, rw}
// combination 2/32
case !i0 && !i1 && !i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Pusher
}{rw, rw, rw}
// combination 3/32
case !i0 && !i1 && !i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
io.ReaderFrom
}{rw, rw, rw}
// combination 4/32
case !i0 && !i1 && !i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw}
// combination 5/32
case !i0 && !i1 && i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Hijacker
}{rw, rw, rw}
// combination 6/32
case !i0 && !i1 && i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Hijacker
http.Pusher
}{rw, rw, rw, rw}
// combination 7/32
case !i0 && !i1 && i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw}
// combination 8/32
case !i0 && !i1 && i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Hijacker
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw}
// combination 9/32
case !i0 && i1 && !i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
}{rw, rw, rw}
// combination 10/32
case !i0 && i1 && !i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
http.Pusher
}{rw, rw, rw, rw}
// combination 11/32
case !i0 && i1 && !i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
io.ReaderFrom
}{rw, rw, rw, rw}
// combination 12/32
case !i0 && i1 && !i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw}
// combination 13/32
case !i0 && i1 && i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
http.Hijacker
}{rw, rw, rw, rw}
// combination 14/32
case !i0 && i1 && i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
http.Hijacker
http.Pusher
}{rw, rw, rw, rw, rw}
// combination 15/32
case !i0 && i1 && i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw, rw}
// combination 16/32
case !i0 && i1 && i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
http.Hijacker
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw, rw}
// combination 17/32
case i0 && !i1 && !i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
}{rw, rw, rw}
// combination 18/32
case i0 && !i1 && !i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Pusher
}{rw, rw, rw, rw}
// combination 19/32
case i0 && !i1 && !i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
io.ReaderFrom
}{rw, rw, rw, rw}
// combination 20/32
case i0 && !i1 && !i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw}
// combination 21/32
case i0 && !i1 && i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Hijacker
}{rw, rw, rw, rw}
// combination 22/32
case i0 && !i1 && i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Hijacker
http.Pusher
}{rw, rw, rw, rw, rw}
// combination 23/32
case i0 && !i1 && i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw, rw}
// combination 24/32
case i0 && !i1 && i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Hijacker
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw, rw}
// combination 25/32
case i0 && i1 && !i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
}{rw, rw, rw, rw}
// combination 26/32
case i0 && i1 && !i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Pusher
}{rw, rw, rw, rw, rw}
// combination 27/32
case i0 && i1 && !i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
io.ReaderFrom
}{rw, rw, rw, rw, rw}
// combination 28/32
case i0 && i1 && !i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw, rw}
// combination 29/32
case i0 && i1 && i2 && !i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
}{rw, rw, rw, rw, rw}
// combination 30/32
case i0 && i1 && i2 && !i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
http.Pusher
}{rw, rw, rw, rw, rw, rw}
// combination 31/32
case i0 && i1 && i2 && i3 && !i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw, rw, rw}
// combination 32/32
case i0 && i1 && i2 && i3 && i4:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
io.ReaderFrom
http.Pusher
}{rw, rw, rw, rw, rw, rw, rw}
}
panic("unreachable")
}
type rw struct {
w http.ResponseWriter
h Hooks
}
func (w *rw) Unwrap() http.ResponseWriter {
return w.w
}
func (w *rw) Header() http.Header {
f := w.w.(http.ResponseWriter).Header
if w.h.Header != nil {
f = w.h.Header(f)
}
return f()
}
func (w *rw) WriteHeader(code int) {
f := w.w.(http.ResponseWriter).WriteHeader
if w.h.WriteHeader != nil {
f = w.h.WriteHeader(f)
}
f(code)
}
func (w *rw) Write(b []byte) (int, error) {
f := w.w.(http.ResponseWriter).Write
if w.h.Write != nil {
f = w.h.Write(f)
}
return f(b)
}
func (w *rw) Flush() {
f := w.w.(http.Flusher).Flush
if w.h.Flush != nil {
f = w.h.Flush(f)
}
f()
}
func (w *rw) CloseNotify() <-chan bool {
f := w.w.(http.CloseNotifier).CloseNotify
if w.h.CloseNotify != nil {
f = w.h.CloseNotify(f)
}
return f()
}
func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) {
f := w.w.(http.Hijacker).Hijack
if w.h.Hijack != nil {
f = w.h.Hijack(f)
}
return f()
}
func (w *rw) ReadFrom(src io.Reader) (int64, error) {
f := w.w.(io.ReaderFrom).ReadFrom
if w.h.ReadFrom != nil {
f = w.h.ReadFrom(f)
}
return f(src)
}
func (w *rw) Push(target string, opts *http.PushOptions) error {
f := w.w.(http.Pusher).Push
if w.h.Push != nil {
f = w.h.Push(f)
}
return f(target, opts)
}
type Unwrapper interface {
Unwrap() http.ResponseWriter
}
// Unwrap returns the underlying http.ResponseWriter from within zero or more
// layers of httpsnoop wrappers.
func Unwrap(w http.ResponseWriter) http.ResponseWriter {
if rw, ok := w.(Unwrapper); ok {
// recurse until rw.Unwrap() returns a non-Unwrapper
return Unwrap(rw.Unwrap())
} else {
return w
}
}

View File

@ -0,0 +1,278 @@
// +build !go1.8
// Code generated by "httpsnoop/codegen"; DO NOT EDIT
package httpsnoop
import (
"bufio"
"io"
"net"
"net/http"
)
// HeaderFunc is part of the http.ResponseWriter interface.
type HeaderFunc func() http.Header
// WriteHeaderFunc is part of the http.ResponseWriter interface.
type WriteHeaderFunc func(code int)
// WriteFunc is part of the http.ResponseWriter interface.
type WriteFunc func(b []byte) (int, error)
// FlushFunc is part of the http.Flusher interface.
type FlushFunc func()
// CloseNotifyFunc is part of the http.CloseNotifier interface.
type CloseNotifyFunc func() <-chan bool
// HijackFunc is part of the http.Hijacker interface.
type HijackFunc func() (net.Conn, *bufio.ReadWriter, error)
// ReadFromFunc is part of the io.ReaderFrom interface.
type ReadFromFunc func(src io.Reader) (int64, error)
// Hooks defines a set of method interceptors for methods included in
// http.ResponseWriter as well as some others. You can think of them as
// middleware for the function calls they target. See Wrap for more details.
type Hooks struct {
Header func(HeaderFunc) HeaderFunc
WriteHeader func(WriteHeaderFunc) WriteHeaderFunc
Write func(WriteFunc) WriteFunc
Flush func(FlushFunc) FlushFunc
CloseNotify func(CloseNotifyFunc) CloseNotifyFunc
Hijack func(HijackFunc) HijackFunc
ReadFrom func(ReadFromFunc) ReadFromFunc
}
// Wrap returns a wrapped version of w that provides the exact same interface
// as w. Specifically if w implements any combination of:
//
// - http.Flusher
// - http.CloseNotifier
// - http.Hijacker
// - io.ReaderFrom
//
// The wrapped version will implement the exact same combination. If no hooks
// are set, the wrapped version also behaves exactly as w. Hooks targeting
// methods not supported by w are ignored. Any other hooks will intercept the
// method they target and may modify the call's arguments and/or return values.
// The CaptureMetrics implementation serves as a working example for how the
// hooks can be used.
func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter {
rw := &rw{w: w, h: hooks}
_, i0 := w.(http.Flusher)
_, i1 := w.(http.CloseNotifier)
_, i2 := w.(http.Hijacker)
_, i3 := w.(io.ReaderFrom)
switch {
// combination 1/16
case !i0 && !i1 && !i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
}{rw, rw}
// combination 2/16
case !i0 && !i1 && !i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
io.ReaderFrom
}{rw, rw, rw}
// combination 3/16
case !i0 && !i1 && i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
http.Hijacker
}{rw, rw, rw}
// combination 4/16
case !i0 && !i1 && i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw}
// combination 5/16
case !i0 && i1 && !i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
}{rw, rw, rw}
// combination 6/16
case !i0 && i1 && !i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
io.ReaderFrom
}{rw, rw, rw, rw}
// combination 7/16
case !i0 && i1 && i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
http.Hijacker
}{rw, rw, rw, rw}
// combination 8/16
case !i0 && i1 && i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
http.CloseNotifier
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw, rw}
// combination 9/16
case i0 && !i1 && !i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
}{rw, rw, rw}
// combination 10/16
case i0 && !i1 && !i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
io.ReaderFrom
}{rw, rw, rw, rw}
// combination 11/16
case i0 && !i1 && i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Hijacker
}{rw, rw, rw, rw}
// combination 12/16
case i0 && !i1 && i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw, rw}
// combination 13/16
case i0 && i1 && !i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
}{rw, rw, rw, rw}
// combination 14/16
case i0 && i1 && !i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
io.ReaderFrom
}{rw, rw, rw, rw, rw}
// combination 15/16
case i0 && i1 && i2 && !i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
}{rw, rw, rw, rw, rw}
// combination 16/16
case i0 && i1 && i2 && i3:
return struct {
Unwrapper
http.ResponseWriter
http.Flusher
http.CloseNotifier
http.Hijacker
io.ReaderFrom
}{rw, rw, rw, rw, rw, rw}
}
panic("unreachable")
}
type rw struct {
w http.ResponseWriter
h Hooks
}
func (w *rw) Unwrap() http.ResponseWriter {
return w.w
}
func (w *rw) Header() http.Header {
f := w.w.(http.ResponseWriter).Header
if w.h.Header != nil {
f = w.h.Header(f)
}
return f()
}
func (w *rw) WriteHeader(code int) {
f := w.w.(http.ResponseWriter).WriteHeader
if w.h.WriteHeader != nil {
f = w.h.WriteHeader(f)
}
f(code)
}
func (w *rw) Write(b []byte) (int, error) {
f := w.w.(http.ResponseWriter).Write
if w.h.Write != nil {
f = w.h.Write(f)
}
return f(b)
}
func (w *rw) Flush() {
f := w.w.(http.Flusher).Flush
if w.h.Flush != nil {
f = w.h.Flush(f)
}
f()
}
func (w *rw) CloseNotify() <-chan bool {
f := w.w.(http.CloseNotifier).CloseNotify
if w.h.CloseNotify != nil {
f = w.h.CloseNotify(f)
}
return f()
}
func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) {
f := w.w.(http.Hijacker).Hijack
if w.h.Hijack != nil {
f = w.h.Hijack(f)
}
return f()
}
func (w *rw) ReadFrom(src io.Reader) (int64, error) {
f := w.w.(io.ReaderFrom).ReadFrom
if w.h.ReadFrom != nil {
f = w.h.ReadFrom(f)
}
return f(src)
}
type Unwrapper interface {
Unwrap() http.ResponseWriter
}
// Unwrap returns the underlying http.ResponseWriter from within zero or more
// layers of httpsnoop wrappers.
func Unwrap(w http.ResponseWriter) http.ResponseWriter {
if rw, ok := w.(Unwrapper); ok {
// recurse until rw.Unwrap() returns a non-Unwrapper
return Unwrap(rw.Unwrap())
} else {
return w
}
}

20
vendor/github.com/gorilla/handlers/.editorconfig generated vendored Normal file
View File

@ -0,0 +1,20 @@
; https://editorconfig.org/
root = true
[*]
insert_final_newline = true
charset = utf-8
trim_trailing_whitespace = true
indent_style = space
indent_size = 2
[{Makefile,go.mod,go.sum,*.go,.gitmodules}]
indent_style = tab
indent_size = 4
[*.md]
indent_size = 4
trim_trailing_whitespace = false
eclint_indent_style = unset

2
vendor/github.com/gorilla/handlers/.gitignore generated vendored Normal file
View File

@ -0,0 +1,2 @@
# Output of the go test coverage tool
coverage.coverprofile

27
vendor/github.com/gorilla/handlers/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2023 The Gorilla Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

34
vendor/github.com/gorilla/handlers/Makefile generated vendored Normal file
View File

@ -0,0 +1,34 @@
GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '')
GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest
GO_SEC=$(shell which gosec 2> /dev/null || echo '')
GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest
GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '')
GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest
.PHONY: verify
verify: sec govulncheck lint test
.PHONY: lint
lint:
$(if $(GO_LINT), ,go install $(GO_LINT_URI))
@echo "##### Running golangci-lint #####"
golangci-lint run -v
.PHONY: sec
sec:
$(if $(GO_SEC), ,go install $(GO_SEC_URI))
@echo "##### Running gosec #####"
gosec ./...
.PHONY: govulncheck
govulncheck:
$(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI))
@echo "##### Running govulncheck #####"
govulncheck ./...
.PHONY: test
test:
@echo "##### Running tests #####"
go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./...

56
vendor/github.com/gorilla/handlers/README.md generated vendored Normal file
View File

@ -0,0 +1,56 @@
# gorilla/handlers
![Testing](https://github.com/gorilla/handlers/actions/workflows/test.yml/badge.svg)
[![Codecov](https://codecov.io/github/gorilla/handlers/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/handlers)
[![GoDoc](https://godoc.org/github.com/gorilla/handlers?status.svg)](https://godoc.org/github.com/gorilla/handlers)
[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/handlers/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/handlers?badge)
Package handlers is a collection of handlers (aka "HTTP middleware") for use
with Go's `net/http` package (or any framework supporting `http.Handler`), including:
* [**LoggingHandler**](https://godoc.org/github.com/gorilla/handlers#LoggingHandler) for logging HTTP requests in the Apache [Common Log
Format](http://httpd.apache.org/docs/2.2/logs.html#common).
* [**CombinedLoggingHandler**](https://godoc.org/github.com/gorilla/handlers#CombinedLoggingHandler) for logging HTTP requests in the Apache [Combined Log
Format](http://httpd.apache.org/docs/2.2/logs.html#combined) commonly used by
both Apache and nginx.
* [**CompressHandler**](https://godoc.org/github.com/gorilla/handlers#CompressHandler) for gzipping responses.
* [**ContentTypeHandler**](https://godoc.org/github.com/gorilla/handlers#ContentTypeHandler) for validating requests against a list of accepted
content types.
* [**MethodHandler**](https://godoc.org/github.com/gorilla/handlers#MethodHandler) for matching HTTP methods against handlers in a
`map[string]http.Handler`
* [**ProxyHeaders**](https://godoc.org/github.com/gorilla/handlers#ProxyHeaders) for populating `r.RemoteAddr` and `r.URL.Scheme` based on the
`X-Forwarded-For`, `X-Real-IP`, `X-Forwarded-Proto` and RFC7239 `Forwarded`
headers when running a Go server behind a HTTP reverse proxy.
* [**CanonicalHost**](https://godoc.org/github.com/gorilla/handlers#CanonicalHost) for re-directing to the preferred host when handling multiple
domains (i.e. multiple CNAME aliases).
* [**RecoveryHandler**](https://godoc.org/github.com/gorilla/handlers#RecoveryHandler) for recovering from unexpected panics.
Other handlers are documented [on the Gorilla
website](https://www.gorillatoolkit.org/pkg/handlers).
## Example
A simple example using `handlers.LoggingHandler` and `handlers.CompressHandler`:
```go
import (
"net/http"
"github.com/gorilla/handlers"
)
func main() {
r := http.NewServeMux()
// Only log requests to our admin dashboard to stdout
r.Handle("/admin", handlers.LoggingHandler(os.Stdout, http.HandlerFunc(ShowAdminDashboard)))
r.HandleFunc("/", ShowIndex)
// Wrap our server with our gzip handler to gzip compress all responses.
http.ListenAndServe(":8000", handlers.CompressHandler(r))
}
```
## License
BSD licensed. See the included LICENSE file for details.

73
vendor/github.com/gorilla/handlers/canonical.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
package handlers
import (
"net/http"
"net/url"
"strings"
)
type canonical struct {
h http.Handler
domain string
code int
}
// CanonicalHost is HTTP middleware that re-directs requests to the canonical
// domain. It accepts a domain and a status code (e.g. 301 or 302) and
// re-directs clients to this domain. The existing request path is maintained.
//
// Note: If the provided domain is considered invalid by url.Parse or otherwise
// returns an empty scheme or host, clients are not re-directed.
//
// Example:
//
// r := mux.NewRouter()
// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302)
// r.HandleFunc("/route", YourHandler)
//
// log.Fatal(http.ListenAndServe(":7000", canonical(r)))
func CanonicalHost(domain string, code int) func(h http.Handler) http.Handler {
fn := func(h http.Handler) http.Handler {
return canonical{h, domain, code}
}
return fn
}
func (c canonical) ServeHTTP(w http.ResponseWriter, r *http.Request) {
dest, err := url.Parse(c.domain)
if err != nil {
// Call the next handler if the provided domain fails to parse.
c.h.ServeHTTP(w, r)
return
}
if dest.Scheme == "" || dest.Host == "" {
// Call the next handler if the scheme or host are empty.
// Note that url.Parse won't fail on in this case.
c.h.ServeHTTP(w, r)
return
}
if !strings.EqualFold(cleanHost(r.Host), dest.Host) {
// Re-build the destination URL
dest := dest.Scheme + "://" + dest.Host + r.URL.Path
if r.URL.RawQuery != "" {
dest += "?" + r.URL.RawQuery
}
http.Redirect(w, r, dest, c.code)
return
}
c.h.ServeHTTP(w, r)
}
// cleanHost cleans invalid Host headers by stripping anything after '/' or ' '.
// This is backported from Go 1.5 (in response to issue #11206) and attempts to
// mitigate malformed Host headers that do not match the format in RFC7230.
func cleanHost(in string) string {
if i := strings.IndexAny(in, " /"); i != -1 {
return in[:i]
}
return in
}

143
vendor/github.com/gorilla/handlers/compress.go generated vendored Normal file
View File

@ -0,0 +1,143 @@
// Copyright 2013 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package handlers
import (
"compress/flate"
"compress/gzip"
"io"
"net/http"
"strings"
"github.com/felixge/httpsnoop"
)
const acceptEncoding string = "Accept-Encoding"
type compressResponseWriter struct {
compressor io.Writer
w http.ResponseWriter
}
func (cw *compressResponseWriter) WriteHeader(c int) {
cw.w.Header().Del("Content-Length")
cw.w.WriteHeader(c)
}
func (cw *compressResponseWriter) Write(b []byte) (int, error) {
h := cw.w.Header()
if h.Get("Content-Type") == "" {
h.Set("Content-Type", http.DetectContentType(b))
}
h.Del("Content-Length")
return cw.compressor.Write(b)
}
func (cw *compressResponseWriter) ReadFrom(r io.Reader) (int64, error) {
return io.Copy(cw.compressor, r)
}
type flusher interface {
Flush() error
}
func (cw *compressResponseWriter) Flush() {
// Flush compressed data if compressor supports it.
if f, ok := cw.compressor.(flusher); ok {
_ = f.Flush()
}
// Flush HTTP response.
if f, ok := cw.w.(http.Flusher); ok {
f.Flush()
}
}
// CompressHandler gzip compresses HTTP responses for clients that support it
// via the 'Accept-Encoding' header.
//
// Compressing TLS traffic may leak the page contents to an attacker if the
// page contains user input: http://security.stackexchange.com/a/102015/12208
func CompressHandler(h http.Handler) http.Handler {
return CompressHandlerLevel(h, gzip.DefaultCompression)
}
// CompressHandlerLevel gzip compresses HTTP responses with specified compression level
// for clients that support it via the 'Accept-Encoding' header.
//
// The compression level should be gzip.DefaultCompression, gzip.NoCompression,
// or any integer value between gzip.BestSpeed and gzip.BestCompression inclusive.
// gzip.DefaultCompression is used in case of invalid compression level.
func CompressHandlerLevel(h http.Handler, level int) http.Handler {
if level < gzip.DefaultCompression || level > gzip.BestCompression {
level = gzip.DefaultCompression
}
const (
gzipEncoding = "gzip"
flateEncoding = "deflate"
)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// detect what encoding to use
var encoding string
for _, curEnc := range strings.Split(r.Header.Get(acceptEncoding), ",") {
curEnc = strings.TrimSpace(curEnc)
if curEnc == gzipEncoding || curEnc == flateEncoding {
encoding = curEnc
break
}
}
// always add Accept-Encoding to Vary to prevent intermediate caches corruption
w.Header().Add("Vary", acceptEncoding)
// if we weren't able to identify an encoding we're familiar with, pass on the
// request to the handler and return
if encoding == "" {
h.ServeHTTP(w, r)
return
}
if r.Header.Get("Upgrade") != "" {
h.ServeHTTP(w, r)
return
}
// wrap the ResponseWriter with the writer for the chosen encoding
var encWriter io.WriteCloser
if encoding == gzipEncoding {
encWriter, _ = gzip.NewWriterLevel(w, level)
} else if encoding == flateEncoding {
encWriter, _ = flate.NewWriter(w, level)
}
defer encWriter.Close()
w.Header().Set("Content-Encoding", encoding)
r.Header.Del(acceptEncoding)
cw := &compressResponseWriter{
w: w,
compressor: encWriter,
}
w = httpsnoop.Wrap(w, httpsnoop.Hooks{
Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc {
return cw.Write
},
WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc {
return cw.WriteHeader
},
Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc {
return cw.Flush
},
ReadFrom: func(rff httpsnoop.ReadFromFunc) httpsnoop.ReadFromFunc {
return cw.ReadFrom
},
})
h.ServeHTTP(w, r)
})
}

352
vendor/github.com/gorilla/handlers/cors.go generated vendored Normal file
View File

@ -0,0 +1,352 @@
package handlers
import (
"net/http"
"strconv"
"strings"
)
// CORSOption represents a functional option for configuring the CORS middleware.
type CORSOption func(*cors) error
type cors struct {
h http.Handler
allowedHeaders []string
allowedMethods []string
allowedOrigins []string
allowedOriginValidator OriginValidator
exposedHeaders []string
maxAge int
ignoreOptions bool
allowCredentials bool
optionStatusCode int
}
// OriginValidator takes an origin string and returns whether or not that origin is allowed.
type OriginValidator func(string) bool
var (
defaultCorsOptionStatusCode = http.StatusOK
defaultCorsMethods = []string{http.MethodGet, http.MethodHead, http.MethodPost}
defaultCorsHeaders = []string{"Accept", "Accept-Language", "Content-Language", "Origin"}
// (WebKit/Safari v9 sends the Origin header by default in AJAX requests).
)
const (
corsOptionMethod string = http.MethodOptions
corsAllowOriginHeader string = "Access-Control-Allow-Origin"
corsExposeHeadersHeader string = "Access-Control-Expose-Headers"
corsMaxAgeHeader string = "Access-Control-Max-Age"
corsAllowMethodsHeader string = "Access-Control-Allow-Methods"
corsAllowHeadersHeader string = "Access-Control-Allow-Headers"
corsAllowCredentialsHeader string = "Access-Control-Allow-Credentials"
corsRequestMethodHeader string = "Access-Control-Request-Method"
corsRequestHeadersHeader string = "Access-Control-Request-Headers"
corsOriginHeader string = "Origin"
corsVaryHeader string = "Vary"
corsOriginMatchAll string = "*"
)
func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) {
origin := r.Header.Get(corsOriginHeader)
if !ch.isOriginAllowed(origin) {
if r.Method != corsOptionMethod || ch.ignoreOptions {
ch.h.ServeHTTP(w, r)
}
return
}
if r.Method == corsOptionMethod {
if ch.ignoreOptions {
ch.h.ServeHTTP(w, r)
return
}
if _, ok := r.Header[corsRequestMethodHeader]; !ok {
w.WriteHeader(http.StatusBadRequest)
return
}
method := r.Header.Get(corsRequestMethodHeader)
if !ch.isMatch(method, ch.allowedMethods) {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
requestHeaders := strings.Split(r.Header.Get(corsRequestHeadersHeader), ",")
allowedHeaders := []string{}
for _, v := range requestHeaders {
canonicalHeader := http.CanonicalHeaderKey(strings.TrimSpace(v))
if canonicalHeader == "" || ch.isMatch(canonicalHeader, defaultCorsHeaders) {
continue
}
if !ch.isMatch(canonicalHeader, ch.allowedHeaders) {
w.WriteHeader(http.StatusForbidden)
return
}
allowedHeaders = append(allowedHeaders, canonicalHeader)
}
if len(allowedHeaders) > 0 {
w.Header().Set(corsAllowHeadersHeader, strings.Join(allowedHeaders, ","))
}
if ch.maxAge > 0 {
w.Header().Set(corsMaxAgeHeader, strconv.Itoa(ch.maxAge))
}
if !ch.isMatch(method, defaultCorsMethods) {
w.Header().Set(corsAllowMethodsHeader, method)
}
} else if len(ch.exposedHeaders) > 0 {
w.Header().Set(corsExposeHeadersHeader, strings.Join(ch.exposedHeaders, ","))
}
if ch.allowCredentials {
w.Header().Set(corsAllowCredentialsHeader, "true")
}
if len(ch.allowedOrigins) > 1 {
w.Header().Set(corsVaryHeader, corsOriginHeader)
}
returnOrigin := origin
if ch.allowedOriginValidator == nil && len(ch.allowedOrigins) == 0 {
returnOrigin = "*"
} else {
for _, o := range ch.allowedOrigins {
// A configuration of * is different than explicitly setting an allowed
// origin. Returning arbitrary origin headers in an access control allow
// origin header is unsafe and is not required by any use case.
if o == corsOriginMatchAll {
returnOrigin = "*"
break
}
}
}
w.Header().Set(corsAllowOriginHeader, returnOrigin)
if r.Method == corsOptionMethod {
w.WriteHeader(ch.optionStatusCode)
return
}
ch.h.ServeHTTP(w, r)
}
// CORS provides Cross-Origin Resource Sharing middleware.
// Example:
//
// import (
// "net/http"
//
// "github.com/gorilla/handlers"
// "github.com/gorilla/mux"
// )
//
// func main() {
// r := mux.NewRouter()
// r.HandleFunc("/users", UserEndpoint)
// r.HandleFunc("/projects", ProjectEndpoint)
//
// // Apply the CORS middleware to our top-level router, with the defaults.
// http.ListenAndServe(":8000", handlers.CORS()(r))
// }
func CORS(opts ...CORSOption) func(http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
ch := parseCORSOptions(opts...)
ch.h = h
return ch
}
}
func parseCORSOptions(opts ...CORSOption) *cors {
ch := &cors{
allowedMethods: defaultCorsMethods,
allowedHeaders: defaultCorsHeaders,
allowedOrigins: []string{},
optionStatusCode: defaultCorsOptionStatusCode,
}
for _, option := range opts {
_ = option(ch) //TODO: @bharat-rajani, return error to caller if not nil?
}
return ch
}
//
// Functional options for configuring CORS.
//
// AllowedHeaders adds the provided headers to the list of allowed headers in a
// CORS request.
// This is an append operation so the headers Accept, Accept-Language,
// and Content-Language are always allowed.
// Content-Type must be explicitly declared if accepting Content-Types other than
// application/x-www-form-urlencoded, multipart/form-data, or text/plain.
func AllowedHeaders(headers []string) CORSOption {
return func(ch *cors) error {
for _, v := range headers {
normalizedHeader := http.CanonicalHeaderKey(strings.TrimSpace(v))
if normalizedHeader == "" {
continue
}
if !ch.isMatch(normalizedHeader, ch.allowedHeaders) {
ch.allowedHeaders = append(ch.allowedHeaders, normalizedHeader)
}
}
return nil
}
}
// AllowedMethods can be used to explicitly allow methods in the
// Access-Control-Allow-Methods header.
// This is a replacement operation so you must also
// pass GET, HEAD, and POST if you wish to support those methods.
func AllowedMethods(methods []string) CORSOption {
return func(ch *cors) error {
ch.allowedMethods = []string{}
for _, v := range methods {
normalizedMethod := strings.ToUpper(strings.TrimSpace(v))
if normalizedMethod == "" {
continue
}
if !ch.isMatch(normalizedMethod, ch.allowedMethods) {
ch.allowedMethods = append(ch.allowedMethods, normalizedMethod)
}
}
return nil
}
}
// AllowedOrigins sets the allowed origins for CORS requests, as used in the
// 'Allow-Access-Control-Origin' HTTP header.
// Note: Passing in a []string{"*"} will allow any domain.
func AllowedOrigins(origins []string) CORSOption {
return func(ch *cors) error {
for _, v := range origins {
if v == corsOriginMatchAll {
ch.allowedOrigins = []string{corsOriginMatchAll}
return nil
}
}
ch.allowedOrigins = origins
return nil
}
}
// AllowedOriginValidator sets a function for evaluating allowed origins in CORS requests, represented by the
// 'Allow-Access-Control-Origin' HTTP header.
func AllowedOriginValidator(fn OriginValidator) CORSOption {
return func(ch *cors) error {
ch.allowedOriginValidator = fn
return nil
}
}
// OptionStatusCode sets a custom status code on the OPTIONS requests.
// Default behaviour sets it to 200 to reflect best practices. This is option is not mandatory
// and can be used if you need a custom status code (i.e 204).
//
// More informations on the spec:
// https://fetch.spec.whatwg.org/#cors-preflight-fetch
func OptionStatusCode(code int) CORSOption {
return func(ch *cors) error {
ch.optionStatusCode = code
return nil
}
}
// ExposedHeaders can be used to specify headers that are available
// and will not be stripped out by the user-agent.
func ExposedHeaders(headers []string) CORSOption {
return func(ch *cors) error {
ch.exposedHeaders = []string{}
for _, v := range headers {
normalizedHeader := http.CanonicalHeaderKey(strings.TrimSpace(v))
if normalizedHeader == "" {
continue
}
if !ch.isMatch(normalizedHeader, ch.exposedHeaders) {
ch.exposedHeaders = append(ch.exposedHeaders, normalizedHeader)
}
}
return nil
}
}
// MaxAge determines the maximum age (in seconds) between preflight requests. A
// maximum of 10 minutes is allowed. An age above this value will default to 10
// minutes.
func MaxAge(age int) CORSOption {
return func(ch *cors) error {
// Maximum of 10 minutes.
if age > 600 {
age = 600
}
ch.maxAge = age
return nil
}
}
// IgnoreOptions causes the CORS middleware to ignore OPTIONS requests, instead
// passing them through to the next handler. This is useful when your application
// or framework has a pre-existing mechanism for responding to OPTIONS requests.
func IgnoreOptions() CORSOption {
return func(ch *cors) error {
ch.ignoreOptions = true
return nil
}
}
// AllowCredentials can be used to specify that the user agent may pass
// authentication details along with the request.
func AllowCredentials() CORSOption {
return func(ch *cors) error {
ch.allowCredentials = true
return nil
}
}
func (ch *cors) isOriginAllowed(origin string) bool {
if origin == "" {
return false
}
if ch.allowedOriginValidator != nil {
return ch.allowedOriginValidator(origin)
}
if len(ch.allowedOrigins) == 0 {
return true
}
for _, allowedOrigin := range ch.allowedOrigins {
if allowedOrigin == origin || allowedOrigin == corsOriginMatchAll {
return true
}
}
return false
}
func (ch *cors) isMatch(needle string, haystack []string) bool {
for _, v := range haystack {
if v == needle {
return true
}
}
return false
}

9
vendor/github.com/gorilla/handlers/doc.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
/*
Package handlers is a collection of handlers (aka "HTTP middleware") for use
with Go's net/http package (or any framework supporting http.Handler).
The package includes handlers for logging in standardised formats, compressing
HTTP responses, validating content types and other useful tools for manipulating
requests and responses.
*/
package handlers

150
vendor/github.com/gorilla/handlers/handlers.go generated vendored Normal file
View File

@ -0,0 +1,150 @@
// Copyright 2013 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package handlers
import (
"bufio"
"fmt"
"net"
"net/http"
"sort"
"strings"
)
// MethodHandler is an http.Handler that dispatches to a handler whose key in the
// MethodHandler's map matches the name of the HTTP request's method, eg: GET
//
// If the request's method is OPTIONS and OPTIONS is not a key in the map then
// the handler responds with a status of 200 and sets the Allow header to a
// comma-separated list of available methods.
//
// If the request's method doesn't match any of its keys the handler responds
// with a status of HTTP 405 "Method Not Allowed" and sets the Allow header to a
// comma-separated list of available methods.
type MethodHandler map[string]http.Handler
func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if handler, ok := h[req.Method]; ok {
handler.ServeHTTP(w, req)
} else {
allow := []string{}
for k := range h {
allow = append(allow, k)
}
sort.Strings(allow)
w.Header().Set("Allow", strings.Join(allow, ", "))
if req.Method == http.MethodOptions {
w.WriteHeader(http.StatusOK)
} else {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
}
}
}
// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP
// status code and body size.
type responseLogger struct {
w http.ResponseWriter
status int
size int
}
func (l *responseLogger) Write(b []byte) (int, error) {
size, err := l.w.Write(b)
l.size += size
return size, err
}
func (l *responseLogger) WriteHeader(s int) {
l.w.WriteHeader(s)
l.status = s
}
func (l *responseLogger) Status() int {
return l.status
}
func (l *responseLogger) Size() int {
return l.size
}
func (l *responseLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) {
conn, rw, err := l.w.(http.Hijacker).Hijack()
if err == nil && l.status == 0 {
// The status will be StatusSwitchingProtocols if there was no error and
// WriteHeader has not been called yet
l.status = http.StatusSwitchingProtocols
}
return conn, rw, err
}
// isContentType validates the Content-Type header matches the supplied
// contentType. That is, its type and subtype match.
func isContentType(h http.Header, contentType string) bool {
ct := h.Get("Content-Type")
if i := strings.IndexRune(ct, ';'); i != -1 {
ct = ct[0:i]
}
return ct == contentType
}
// ContentTypeHandler wraps and returns a http.Handler, validating the request
// content type is compatible with the contentTypes list. It writes a HTTP 415
// error if that fails.
//
// Only PUT, POST, and PATCH requests are considered.
func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !(r.Method == http.MethodPut || r.Method == http.MethodPost || r.Method == http.MethodPatch) {
h.ServeHTTP(w, r)
return
}
for _, ct := range contentTypes {
if isContentType(r.Header, ct) {
h.ServeHTTP(w, r)
return
}
}
http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q",
r.Header.Get("Content-Type"),
contentTypes),
http.StatusUnsupportedMediaType)
})
}
const (
// HTTPMethodOverrideHeader is a commonly used
// http header to override a request method.
HTTPMethodOverrideHeader = "X-HTTP-Method-Override"
// HTTPMethodOverrideFormKey is a commonly used
// HTML form key to override a request method.
HTTPMethodOverrideFormKey = "_method"
)
// HTTPMethodOverrideHandler wraps and returns a http.Handler which checks for
// the X-HTTP-Method-Override header or the _method form key, and overrides (if
// valid) request.Method with its value.
//
// This is especially useful for HTTP clients that don't support many http verbs.
// It isn't secure to override e.g a GET to a POST, so only POST requests are
// considered. Likewise, the override method can only be a "write" method: PUT,
// PATCH or DELETE.
//
// Form method takes precedence over header method.
func HTTPMethodOverrideHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost {
om := r.FormValue(HTTPMethodOverrideFormKey)
if om == "" {
om = r.Header.Get(HTTPMethodOverrideHeader)
}
if om == http.MethodPut || om == http.MethodPatch || om == http.MethodDelete {
r.Method = om
}
}
h.ServeHTTP(w, r)
})
}

246
vendor/github.com/gorilla/handlers/logging.go generated vendored Normal file
View File

@ -0,0 +1,246 @@
// Copyright 2013 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package handlers
import (
"io"
"net"
"net/http"
"net/url"
"strconv"
"time"
"unicode/utf8"
"github.com/felixge/httpsnoop"
)
// Logging
// LogFormatterParams is the structure any formatter will be handed when time to log comes.
type LogFormatterParams struct {
Request *http.Request
URL url.URL
TimeStamp time.Time
StatusCode int
Size int
}
// LogFormatter gives the signature of the formatter function passed to CustomLoggingHandler.
type LogFormatter func(writer io.Writer, params LogFormatterParams)
// loggingHandler is the http.Handler implementation for LoggingHandlerTo and its
// friends
type loggingHandler struct {
writer io.Writer
handler http.Handler
formatter LogFormatter
}
func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
t := time.Now()
logger, w := makeLogger(w)
url := *req.URL
h.handler.ServeHTTP(w, req)
if req.MultipartForm != nil {
err := req.MultipartForm.RemoveAll()
if err != nil {
return
}
}
params := LogFormatterParams{
Request: req,
URL: url,
TimeStamp: t,
StatusCode: logger.Status(),
Size: logger.Size(),
}
h.formatter(h.writer, params)
}
func makeLogger(w http.ResponseWriter) (*responseLogger, http.ResponseWriter) {
logger := &responseLogger{w: w, status: http.StatusOK}
return logger, httpsnoop.Wrap(w, httpsnoop.Hooks{
Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc {
return logger.Write
},
WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc {
return logger.WriteHeader
},
})
}
const lowerhex = "0123456789abcdef"
func appendQuoted(buf []byte, s string) []byte {
var runeTmp [utf8.UTFMax]byte
for width := 0; len(s) > 0; s = s[width:] { //nolint: wastedassign //TODO: why width starts from 0and reassigned as 1
r := rune(s[0])
width = 1
if r >= utf8.RuneSelf {
r, width = utf8.DecodeRuneInString(s)
}
if width == 1 && r == utf8.RuneError {
buf = append(buf, `\x`...)
buf = append(buf, lowerhex[s[0]>>4])
buf = append(buf, lowerhex[s[0]&0xF])
continue
}
if r == rune('"') || r == '\\' { // always backslashed
buf = append(buf, '\\')
buf = append(buf, byte(r))
continue
}
if strconv.IsPrint(r) {
n := utf8.EncodeRune(runeTmp[:], r)
buf = append(buf, runeTmp[:n]...)
continue
}
switch r {
case '\a':
buf = append(buf, `\a`...)
case '\b':
buf = append(buf, `\b`...)
case '\f':
buf = append(buf, `\f`...)
case '\n':
buf = append(buf, `\n`...)
case '\r':
buf = append(buf, `\r`...)
case '\t':
buf = append(buf, `\t`...)
case '\v':
buf = append(buf, `\v`...)
default:
switch {
case r < ' ':
buf = append(buf, `\x`...)
buf = append(buf, lowerhex[s[0]>>4])
buf = append(buf, lowerhex[s[0]&0xF])
case r > utf8.MaxRune:
r = 0xFFFD
fallthrough
case r < 0x10000:
buf = append(buf, `\u`...)
for s := 12; s >= 0; s -= 4 {
buf = append(buf, lowerhex[r>>uint(s)&0xF])
}
default:
buf = append(buf, `\U`...)
for s := 28; s >= 0; s -= 4 {
buf = append(buf, lowerhex[r>>uint(s)&0xF])
}
}
}
}
return buf
}
// buildCommonLogLine builds a log entry for req in Apache Common Log Format.
// ts is the timestamp with which the entry should be logged.
// status and size are used to provide the response HTTP status and size.
func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int, size int) []byte {
username := "-"
if url.User != nil {
if name := url.User.Username(); name != "" {
username = name
}
}
host, _, err := net.SplitHostPort(req.RemoteAddr)
if err != nil {
host = req.RemoteAddr
}
uri := req.RequestURI
// Requests using the CONNECT method over HTTP/2.0 must use
// the authority field (aka r.Host) to identify the target.
// Refer: https://httpwg.github.io/specs/rfc7540.html#CONNECT
if req.ProtoMajor == 2 && req.Method == "CONNECT" {
uri = req.Host
}
if uri == "" {
uri = url.RequestURI()
}
buf := make([]byte, 0, 3*(len(host)+len(username)+len(req.Method)+len(uri)+len(req.Proto)+50)/2)
buf = append(buf, host...)
buf = append(buf, " - "...)
buf = append(buf, username...)
buf = append(buf, " ["...)
buf = append(buf, ts.Format("02/Jan/2006:15:04:05 -0700")...)
buf = append(buf, `] "`...)
buf = append(buf, req.Method...)
buf = append(buf, " "...)
buf = appendQuoted(buf, uri)
buf = append(buf, " "...)
buf = append(buf, req.Proto...)
buf = append(buf, `" `...)
buf = append(buf, strconv.Itoa(status)...)
buf = append(buf, " "...)
buf = append(buf, strconv.Itoa(size)...)
return buf
}
// writeLog writes a log entry for req to w in Apache Common Log Format.
// ts is the timestamp with which the entry should be logged.
// status and size are used to provide the response HTTP status and size.
func writeLog(writer io.Writer, params LogFormatterParams) {
buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size)
buf = append(buf, '\n')
_, _ = writer.Write(buf)
}
// writeCombinedLog writes a log entry for req to w in Apache Combined Log Format.
// ts is the timestamp with which the entry should be logged.
// status and size are used to provide the response HTTP status and size.
func writeCombinedLog(writer io.Writer, params LogFormatterParams) {
buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size)
buf = append(buf, ` "`...)
buf = appendQuoted(buf, params.Request.Referer())
buf = append(buf, `" "`...)
buf = appendQuoted(buf, params.Request.UserAgent())
buf = append(buf, '"', '\n')
_, _ = writer.Write(buf)
}
// CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in
// Apache Combined Log Format.
//
// See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format.
//
// LoggingHandler always sets the ident field of the log to -.
func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler {
return loggingHandler{out, h, writeCombinedLog}
}
// LoggingHandler return a http.Handler that wraps h and logs requests to out in
// Apache Common Log Format (CLF).
//
// See http://httpd.apache.org/docs/2.2/logs.html#common for a description of this format.
//
// LoggingHandler always sets the ident field of the log to -
//
// Example:
//
// r := mux.NewRouter()
// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
// w.Write([]byte("This is a catch-all route"))
// })
// loggedRouter := handlers.LoggingHandler(os.Stdout, r)
// http.ListenAndServe(":1123", loggedRouter)
func LoggingHandler(out io.Writer, h http.Handler) http.Handler {
return loggingHandler{out, h, writeLog}
}
// CustomLoggingHandler provides a way to supply a custom log formatter
// while taking advantage of the mechanisms in this package.
func CustomLoggingHandler(out io.Writer, h http.Handler, f LogFormatter) http.Handler {
return loggingHandler{out, h, f}
}

120
vendor/github.com/gorilla/handlers/proxy_headers.go generated vendored Normal file
View File

@ -0,0 +1,120 @@
package handlers
import (
"net/http"
"regexp"
"strings"
)
var (
// De-facto standard header keys.
xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
xForwardedHost = http.CanonicalHeaderKey("X-Forwarded-Host")
xForwardedProto = http.CanonicalHeaderKey("X-Forwarded-Proto")
xForwardedScheme = http.CanonicalHeaderKey("X-Forwarded-Scheme")
xRealIP = http.CanonicalHeaderKey("X-Real-IP")
)
var (
// RFC7239 defines a new "Forwarded: " header designed to replace the
// existing use of X-Forwarded-* headers.
// e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43.
forwarded = http.CanonicalHeaderKey("Forwarded")
// Allows for a sub-match of the first value after 'for=' to the next
// comma, semi-colon or space. The match is case-insensitive.
forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|,| )]+)`)
// Allows for a sub-match for the first instance of scheme (http|https)
// prefixed by 'proto='. The match is case-insensitive.
protoRegex = regexp.MustCompile(`(?i)(?:proto=)(https|http)`)
)
// ProxyHeaders inspects common reverse proxy headers and sets the corresponding
// fields in the HTTP request struct. These are X-Forwarded-For and X-Real-IP
// for the remote (client) IP address, X-Forwarded-Proto or X-Forwarded-Scheme
// for the scheme (http|https), X-Forwarded-Host for the host and the RFC7239
// Forwarded header, which may include both client IPs and schemes.
//
// NOTE: This middleware should only be used when behind a reverse
// proxy like nginx, HAProxy or Apache. Reverse proxies that don't (or are
// configured not to) strip these headers from client requests, or where these
// headers are accepted "as is" from a remote client (e.g. when Go is not behind
// a proxy), can manifest as a vulnerability if your application uses these
// headers for validating the 'trustworthiness' of a request.
func ProxyHeaders(h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
// Set the remote IP with the value passed from the proxy.
if fwd := getIP(r); fwd != "" {
r.RemoteAddr = fwd
}
// Set the scheme (proto) with the value passed from the proxy.
if scheme := getScheme(r); scheme != "" {
r.URL.Scheme = scheme
}
// Set the host with the value passed by the proxy
if r.Header.Get(xForwardedHost) != "" {
r.Host = r.Header.Get(xForwardedHost)
}
// Call the next handler in the chain.
h.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
// getIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239
// Forwarded headers (in that order).
func getIP(r *http.Request) string {
var addr string
switch {
case r.Header.Get(xForwardedFor) != "":
fwd := r.Header.Get(xForwardedFor)
// Only grab the first (client) address. Note that '192.168.0.1,
// 10.1.1.1' is a valid key for X-Forwarded-For where addresses after
// the first may represent forwarding proxies earlier in the chain.
s := strings.Index(fwd, ", ")
if s == -1 {
s = len(fwd)
}
addr = fwd[:s]
case r.Header.Get(xRealIP) != "":
addr = r.Header.Get(xRealIP)
case r.Header.Get(forwarded) != "":
// match should contain at least two elements if the protocol was
// specified in the Forwarded header. The first element will always be
// the 'for=' capture, which we ignore. In the case of multiple IP
// addresses (for=8.8.8.8, 8.8.4.4,172.16.1.20 is valid) we only
// extract the first, which should be the client IP.
if match := forRegex.FindStringSubmatch(r.Header.Get(forwarded)); len(match) > 1 {
// IPv6 addresses in Forwarded headers are quoted-strings. We strip
// these quotes.
addr = strings.Trim(match[1], `"`)
}
}
return addr
}
// getScheme retrieves the scheme from the X-Forwarded-Proto and RFC7239
// Forwarded headers (in that order).
func getScheme(r *http.Request) string {
var scheme string
// Retrieve the scheme from X-Forwarded-Proto.
if proto := r.Header.Get(xForwardedProto); proto != "" {
scheme = strings.ToLower(proto)
} else if proto = r.Header.Get(xForwardedScheme); proto != "" {
scheme = strings.ToLower(proto)
} else if proto = r.Header.Get(forwarded); proto != "" {
// match should contain at least two elements if the protocol was
// specified in the Forwarded header. The first element will always be
// the 'proto=' capture, which we ignore. In the case of multiple proto
// parameters (invalid) we only extract the first.
if match := protoRegex.FindStringSubmatch(proto); len(match) > 1 {
scheme = strings.ToLower(match[1])
}
}
return scheme
}

98
vendor/github.com/gorilla/handlers/recovery.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
package handlers
import (
"log"
"net/http"
"runtime/debug"
)
// RecoveryHandlerLogger is an interface used by the recovering handler to print logs.
type RecoveryHandlerLogger interface {
Println(...interface{})
}
type recoveryHandler struct {
handler http.Handler
logger RecoveryHandlerLogger
printStack bool
}
// RecoveryOption provides a functional approach to define
// configuration for a handler; such as setting the logging
// whether or not to print stack traces on panic.
type RecoveryOption func(http.Handler)
func parseRecoveryOptions(h http.Handler, opts ...RecoveryOption) http.Handler {
for _, option := range opts {
option(h)
}
return h
}
// RecoveryHandler is HTTP middleware that recovers from a panic,
// logs the panic, writes http.StatusInternalServerError, and
// continues to the next handler.
//
// Example:
//
// r := mux.NewRouter()
// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
// panic("Unexpected error!")
// })
//
// http.ListenAndServe(":1123", handlers.RecoveryHandler()(r))
func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
r := &recoveryHandler{handler: h}
return parseRecoveryOptions(r, opts...)
}
}
// RecoveryLogger is a functional option to override
// the default logger.
func RecoveryLogger(logger RecoveryHandlerLogger) RecoveryOption {
return func(h http.Handler) {
r := h.(*recoveryHandler) //nolint:errcheck //TODO:
// @bharat-rajani should return type-assertion error but would break the API?
r.logger = logger
}
}
// PrintRecoveryStack is a functional option to enable
// or disable printing stack traces on panic.
func PrintRecoveryStack(shouldPrint bool) RecoveryOption {
return func(h http.Handler) {
r := h.(*recoveryHandler) //nolint:errcheck //TODO:
// @bharat-rajani should return type-assertion error but would break the API?
r.printStack = shouldPrint
}
}
func (h recoveryHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
defer func() {
if err := recover(); err != nil {
w.WriteHeader(http.StatusInternalServerError)
h.log(err)
}
}()
h.handler.ServeHTTP(w, req)
}
func (h recoveryHandler) log(v ...interface{}) {
if h.logger != nil {
h.logger.Println(v...)
} else {
log.Println(v...)
}
if h.printStack {
stack := string(debug.Stack())
if h.logger != nil {
h.logger.Println(stack)
} else {
log.Println(stack)
}
}
}

20
vendor/github.com/gorilla/mux/.editorconfig generated vendored Normal file
View File

@ -0,0 +1,20 @@
; https://editorconfig.org/
root = true
[*]
insert_final_newline = true
charset = utf-8
trim_trailing_whitespace = true
indent_style = space
indent_size = 2
[{Makefile,go.mod,go.sum,*.go,.gitmodules}]
indent_style = tab
indent_size = 4
[*.md]
indent_size = 4
trim_trailing_whitespace = false
eclint_indent_style = unset

1
vendor/github.com/gorilla/mux/.gitignore generated vendored Normal file
View File

@ -0,0 +1 @@
coverage.coverprofile

27
vendor/github.com/gorilla/mux/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2023 The Gorilla Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

34
vendor/github.com/gorilla/mux/Makefile generated vendored Normal file
View File

@ -0,0 +1,34 @@
GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '')
GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest
GO_SEC=$(shell which gosec 2> /dev/null || echo '')
GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest
GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '')
GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest
.PHONY: golangci-lint
golangci-lint:
$(if $(GO_LINT), ,go install $(GO_LINT_URI))
@echo "##### Running golangci-lint"
golangci-lint run -v
.PHONY: gosec
gosec:
$(if $(GO_SEC), ,go install $(GO_SEC_URI))
@echo "##### Running gosec"
gosec ./...
.PHONY: govulncheck
govulncheck:
$(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI))
@echo "##### Running govulncheck"
govulncheck ./...
.PHONY: verify
verify: golangci-lint gosec govulncheck
.PHONY: test
test:
@echo "##### Running tests"
go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./...

812
vendor/github.com/gorilla/mux/README.md generated vendored Normal file
View File

@ -0,0 +1,812 @@
# gorilla/mux
![testing](https://github.com/gorilla/mux/actions/workflows/test.yml/badge.svg)
[![codecov](https://codecov.io/github/gorilla/mux/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/mux)
[![godoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
[![sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge)
![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5)
Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
their respective handler.
The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
* URL hosts, paths and query values can have variables with an optional regular expression.
* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
---
* [Install](#install)
* [Examples](#examples)
* [Matching Routes](#matching-routes)
* [Static Files](#static-files)
* [Serving Single Page Applications](#serving-single-page-applications) (e.g. React, Vue, Ember.js, etc.)
* [Registered URLs](#registered-urls)
* [Walking Routes](#walking-routes)
* [Graceful Shutdown](#graceful-shutdown)
* [Middleware](#middleware)
* [Handling CORS Requests](#handling-cors-requests)
* [Testing Handlers](#testing-handlers)
* [Full Example](#full-example)
---
## Install
With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain:
```sh
go get -u github.com/gorilla/mux
```
## Examples
Let's start registering a couple of URL paths and handlers:
```go
func main() {
r := mux.NewRouter()
r.HandleFunc("/", HomeHandler)
r.HandleFunc("/products", ProductsHandler)
r.HandleFunc("/articles", ArticlesHandler)
http.Handle("/", r)
}
```
Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters.
Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example:
```go
r := mux.NewRouter()
r.HandleFunc("/products/{key}", ProductHandler)
r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
```
The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
```go
func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Category: %v\n", vars["category"])
}
```
And this is all you need to know about the basic usage. More advanced options are explained below.
### Matching Routes
Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables:
```go
r := mux.NewRouter()
// Only matches if domain is "www.example.com".
r.Host("www.example.com")
// Matches a dynamic subdomain.
r.Host("{subdomain:[a-z]+}.example.com")
```
There are several other matchers that can be added. To match path prefixes:
```go
r.PathPrefix("/products/")
```
...or HTTP methods:
```go
r.Methods("GET", "POST")
```
...or URL schemes:
```go
r.Schemes("https")
```
...or header values:
```go
r.Headers("X-Requested-With", "XMLHttpRequest")
```
...or query values:
```go
r.Queries("key", "value")
```
...or to use a custom matcher function:
```go
r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
return r.ProtoMajor == 0
})
```
...and finally, it is possible to combine several matchers in a single route:
```go
r.HandleFunc("/products", ProductsHandler).
Host("www.example.com").
Methods("GET").
Schemes("http")
```
Routes are tested in the order they were added to the router. If two routes match, the first one wins:
```go
r := mux.NewRouter()
r.HandleFunc("/specific", specificHandler)
r.PathPrefix("/").Handler(catchAllHandler)
```
Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
```go
r := mux.NewRouter()
s := r.Host("www.example.com").Subrouter()
```
Then register routes in the subrouter:
```go
s.HandleFunc("/products/", ProductsHandler)
s.HandleFunc("/products/{key}", ProductHandler)
s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
```
The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route.
Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter.
There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths:
```go
r := mux.NewRouter()
s := r.PathPrefix("/products").Subrouter()
// "/products/"
s.HandleFunc("/", ProductsHandler)
// "/products/{key}/"
s.HandleFunc("/{key}/", ProductHandler)
// "/products/{key}/details"
s.HandleFunc("/{key}/details", ProductDetailsHandler)
```
### Static Files
Note that the path provided to `PathPrefix()` represents a "wildcard": calling
`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
request that matches "/static/\*". This makes it easy to serve static files with mux:
```go
func main() {
var dir string
flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
flag.Parse()
r := mux.NewRouter()
// This will serve files under http://localhost:8000/static/<filename>
r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
srv := &http.Server{
Handler: r,
Addr: "127.0.0.1:8000",
// Good practice: enforce timeouts for servers you create!
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
log.Fatal(srv.ListenAndServe())
}
```
### Serving Single Page Applications
Most of the time it makes sense to serve your SPA on a separate web server from your API,
but sometimes it's desirable to serve them both from one place. It's possible to write a simple
handler for serving your SPA (for use with React Router's [BrowserRouter](https://reacttraining.com/react-router/web/api/BrowserRouter) for example), and leverage
mux's powerful routing for your API endpoints.
```go
package main
import (
"encoding/json"
"log"
"net/http"
"os"
"path/filepath"
"time"
"github.com/gorilla/mux"
)
// spaHandler implements the http.Handler interface, so we can use it
// to respond to HTTP requests. The path to the static directory and
// path to the index file within that static directory are used to
// serve the SPA in the given static directory.
type spaHandler struct {
staticPath string
indexPath string
}
// ServeHTTP inspects the URL path to locate a file within the static dir
// on the SPA handler. If a file is found, it will be served. If not, the
// file located at the index path on the SPA handler will be served. This
// is suitable behavior for serving an SPA (single page application).
func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Join internally call path.Clean to prevent directory traversal
path := filepath.Join(h.staticPath, r.URL.Path)
// check whether a file exists or is a directory at the given path
fi, err := os.Stat(path)
if os.IsNotExist(err) || fi.IsDir() {
// file does not exist or path is a directory, serve index.html
http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath))
return
}
if err != nil {
// if we got an error (that wasn't that the file doesn't exist) stating the
// file, return a 500 internal server error and stop
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// otherwise, use http.FileServer to serve the static file
http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r)
}
func main() {
router := mux.NewRouter()
router.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) {
// an example API handler
json.NewEncoder(w).Encode(map[string]bool{"ok": true})
})
spa := spaHandler{staticPath: "build", indexPath: "index.html"}
router.PathPrefix("/").Handler(spa)
srv := &http.Server{
Handler: router,
Addr: "127.0.0.1:8000",
// Good practice: enforce timeouts for servers you create!
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
log.Fatal(srv.ListenAndServe())
}
```
### Registered URLs
Now let's see how to build registered URLs.
Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example:
```go
r := mux.NewRouter()
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
Name("article")
```
To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do:
```go
url, err := r.Get("article").URL("category", "technology", "id", "42")
```
...and the result will be a `url.URL` with the following path:
```
"/articles/technology/42"
```
This also works for host and query value variables:
```go
r := mux.NewRouter()
r.Host("{subdomain}.example.com").
Path("/articles/{category}/{id:[0-9]+}").
Queries("filter", "{filter}").
HandlerFunc(ArticleHandler).
Name("article")
// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42",
"filter", "gorilla")
```
All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
Regex support also exists for matching Headers within a route. For example, we could do:
```go
r.HeadersRegexp("Content-Type", "application/(text|json)")
```
...and the route will match both requests with a Content-Type of `application/json` as well as `application/text`
There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
```go
// "http://news.example.com/"
host, err := r.Get("article").URLHost("subdomain", "news")
// "/articles/technology/42"
path, err := r.Get("article").URLPath("category", "technology", "id", "42")
```
And if you use subrouters, host and path defined separately can be built as well:
```go
r := mux.NewRouter()
s := r.Host("{subdomain}.example.com").Subrouter()
s.Path("/articles/{category}/{id:[0-9]+}").
HandlerFunc(ArticleHandler).
Name("article")
// "http://news.example.com/articles/technology/42"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42")
```
To find all the required variables for a given route when calling `URL()`, the method `GetVarNames()` is available:
```go
r := mux.NewRouter()
r.Host("{domain}").
Path("/{group}/{item_id}").
Queries("some_data1", "{some_data1}").
Queries("some_data2", "{some_data2}").
Name("article")
// Will print [domain group item_id some_data1 some_data2] <nil>
fmt.Println(r.Get("article").GetVarNames())
```
### Walking Routes
The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example,
the following prints all of the registered routes:
```go
package main
import (
"fmt"
"net/http"
"strings"
"github.com/gorilla/mux"
)
func handler(w http.ResponseWriter, r *http.Request) {
return
}
func main() {
r := mux.NewRouter()
r.HandleFunc("/", handler)
r.HandleFunc("/products", handler).Methods("POST")
r.HandleFunc("/articles", handler).Methods("GET")
r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
r.HandleFunc("/authors", handler).Queries("surname", "{surname}")
err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
pathTemplate, err := route.GetPathTemplate()
if err == nil {
fmt.Println("ROUTE:", pathTemplate)
}
pathRegexp, err := route.GetPathRegexp()
if err == nil {
fmt.Println("Path regexp:", pathRegexp)
}
queriesTemplates, err := route.GetQueriesTemplates()
if err == nil {
fmt.Println("Queries templates:", strings.Join(queriesTemplates, ","))
}
queriesRegexps, err := route.GetQueriesRegexp()
if err == nil {
fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ","))
}
methods, err := route.GetMethods()
if err == nil {
fmt.Println("Methods:", strings.Join(methods, ","))
}
fmt.Println()
return nil
})
if err != nil {
fmt.Println(err)
}
http.Handle("/", r)
}
```
### Graceful Shutdown
Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`:
```go
package main
import (
"context"
"flag"
"log"
"net/http"
"os"
"os/signal"
"time"
"github.com/gorilla/mux"
)
func main() {
var wait time.Duration
flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m")
flag.Parse()
r := mux.NewRouter()
// Add your routes as needed
srv := &http.Server{
Addr: "0.0.0.0:8080",
// Good practice to set timeouts to avoid Slowloris attacks.
WriteTimeout: time.Second * 15,
ReadTimeout: time.Second * 15,
IdleTimeout: time.Second * 60,
Handler: r, // Pass our instance of gorilla/mux in.
}
// Run our server in a goroutine so that it doesn't block.
go func() {
if err := srv.ListenAndServe(); err != nil {
log.Println(err)
}
}()
c := make(chan os.Signal, 1)
// We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C)
// SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught.
signal.Notify(c, os.Interrupt)
// Block until we receive our signal.
<-c
// Create a deadline to wait for.
ctx, cancel := context.WithTimeout(context.Background(), wait)
defer cancel()
// Doesn't block if no connections, but will otherwise wait
// until the timeout deadline.
srv.Shutdown(ctx)
// Optionally, you could run srv.Shutdown in a goroutine and block on
// <-ctx.Done() if your application should wait for other services
// to finalize based on context cancellation.
log.Println("shutting down")
os.Exit(0)
}
```
### Middleware
Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters.
Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking.
Mux middlewares are defined using the de facto standard type:
```go
type MiddlewareFunc func(http.Handler) http.Handler
```
Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers.
A very basic middleware which logs the URI of the request being handled could be written as:
```go
func loggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Do stuff here
log.Println(r.RequestURI)
// Call the next handler, which can be another middleware in the chain, or the final handler.
next.ServeHTTP(w, r)
})
}
```
Middlewares can be added to a router using `Router.Use()`:
```go
r := mux.NewRouter()
r.HandleFunc("/", handler)
r.Use(loggingMiddleware)
```
A more complex authentication middleware, which maps session token to users, could be written as:
```go
// Define our struct
type authenticationMiddleware struct {
tokenUsers map[string]string
}
// Initialize it somewhere
func (amw *authenticationMiddleware) Populate() {
amw.tokenUsers["00000000"] = "user0"
amw.tokenUsers["aaaaaaaa"] = "userA"
amw.tokenUsers["05f717e5"] = "randomUser"
amw.tokenUsers["deadbeef"] = "user0"
}
// Middleware function, which will be called for each request
func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
token := r.Header.Get("X-Session-Token")
if user, found := amw.tokenUsers[token]; found {
// We found the token in our map
log.Printf("Authenticated user %s\n", user)
// Pass down the request to the next middleware (or final handler)
next.ServeHTTP(w, r)
} else {
// Write an error and stop the handler chain
http.Error(w, "Forbidden", http.StatusForbidden)
}
})
}
```
```go
r := mux.NewRouter()
r.HandleFunc("/", handler)
amw := authenticationMiddleware{tokenUsers: make(map[string]string)}
amw.Populate()
r.Use(amw.Middleware)
```
Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it.
### Handling CORS Requests
[CORSMethodMiddleware](https://godoc.org/github.com/gorilla/mux#CORSMethodMiddleware) intends to make it easier to strictly set the `Access-Control-Allow-Methods` response header.
* You will still need to use your own CORS handler to set the other CORS headers such as `Access-Control-Allow-Origin`
* The middleware will set the `Access-Control-Allow-Methods` header to all the method matchers (e.g. `r.Methods(http.MethodGet, http.MethodPut, http.MethodOptions)` -> `Access-Control-Allow-Methods: GET,PUT,OPTIONS`) on a route
* If you do not specify any methods, then:
> _Important_: there must be an `OPTIONS` method matcher for the middleware to set the headers.
Here is an example of using `CORSMethodMiddleware` along with a custom `OPTIONS` handler to set all the required CORS headers:
```go
package main
import (
"net/http"
"github.com/gorilla/mux"
)
func main() {
r := mux.NewRouter()
// IMPORTANT: you must specify an OPTIONS method matcher for the middleware to set CORS headers
r.HandleFunc("/foo", fooHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodOptions)
r.Use(mux.CORSMethodMiddleware(r))
http.ListenAndServe(":8080", r)
}
func fooHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
if r.Method == http.MethodOptions {
return
}
w.Write([]byte("foo"))
}
```
And an request to `/foo` using something like:
```bash
curl localhost:8080/foo -v
```
Would look like:
```bash
* Trying ::1...
* TCP_NODELAY set
* Connected to localhost (::1) port 8080 (#0)
> GET /foo HTTP/1.1
> Host: localhost:8080
> User-Agent: curl/7.59.0
> Accept: */*
>
< HTTP/1.1 200 OK
< Access-Control-Allow-Methods: GET,PUT,PATCH,OPTIONS
< Access-Control-Allow-Origin: *
< Date: Fri, 28 Jun 2019 20:13:30 GMT
< Content-Length: 3
< Content-Type: text/plain; charset=utf-8
<
* Connection #0 to host localhost left intact
foo
```
### Testing Handlers
Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_.
First, our simple HTTP handler:
```go
// endpoints.go
package main
func HealthCheckHandler(w http.ResponseWriter, r *http.Request) {
// A very simple health check.
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
// In the future we could report back on the status of our DB, or our cache
// (e.g. Redis) by performing a simple PING, and include them in the response.
io.WriteString(w, `{"alive": true}`)
}
func main() {
r := mux.NewRouter()
r.HandleFunc("/health", HealthCheckHandler)
log.Fatal(http.ListenAndServe("localhost:8080", r))
}
```
Our test code:
```go
// endpoints_test.go
package main
import (
"net/http"
"net/http/httptest"
"testing"
)
func TestHealthCheckHandler(t *testing.T) {
// Create a request to pass to our handler. We don't have any query parameters for now, so we'll
// pass 'nil' as the third parameter.
req, err := http.NewRequest("GET", "/health", nil)
if err != nil {
t.Fatal(err)
}
// We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.
rr := httptest.NewRecorder()
handler := http.HandlerFunc(HealthCheckHandler)
// Our handlers satisfy http.Handler, so we can call their ServeHTTP method
// directly and pass in our Request and ResponseRecorder.
handler.ServeHTTP(rr, req)
// Check the status code is what we expect.
if status := rr.Code; status != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v",
status, http.StatusOK)
}
// Check the response body is what we expect.
expected := `{"alive": true}`
if rr.Body.String() != expected {
t.Errorf("handler returned unexpected body: got %v want %v",
rr.Body.String(), expected)
}
}
```
In the case that our routes have [variables](#examples), we can pass those in the request. We could write
[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple
possible route variables as needed.
```go
// endpoints.go
func main() {
r := mux.NewRouter()
// A route with a route variable:
r.HandleFunc("/metrics/{type}", MetricsHandler)
log.Fatal(http.ListenAndServe("localhost:8080", r))
}
```
Our test file, with a table-driven test of `routeVariables`:
```go
// endpoints_test.go
func TestMetricsHandler(t *testing.T) {
tt := []struct{
routeVariable string
shouldPass bool
}{
{"goroutines", true},
{"heap", true},
{"counters", true},
{"queries", true},
{"adhadaeqm3k", false},
}
for _, tc := range tt {
path := fmt.Sprintf("/metrics/%s", tc.routeVariable)
req, err := http.NewRequest("GET", path, nil)
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
// To add the vars to the context,
// we need to create a router through which we can pass the request.
router := mux.NewRouter()
router.HandleFunc("/metrics/{type}", MetricsHandler)
router.ServeHTTP(rr, req)
// In this case, our MetricsHandler returns a non-200 response
// for a route variable it doesn't know about.
if rr.Code == http.StatusOK && !tc.shouldPass {
t.Errorf("handler should have failed on routeVariable %s: got %v want %v",
tc.routeVariable, rr.Code, http.StatusOK)
}
}
}
```
## Full Example
Here's a complete, runnable example of a small `mux` based server:
```go
package main
import (
"net/http"
"log"
"github.com/gorilla/mux"
)
func YourHandler(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Gorilla!\n"))
}
func main() {
r := mux.NewRouter()
// Routes consist of a path and a handler function.
r.HandleFunc("/", YourHandler)
// Bind to a port and pass our router in
log.Fatal(http.ListenAndServe(":8000", r))
}
```
## License
BSD licensed. See the LICENSE file for details.

305
vendor/github.com/gorilla/mux/doc.go generated vendored Normal file
View File

@ -0,0 +1,305 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package mux implements a request router and dispatcher.
The name mux stands for "HTTP request multiplexer". Like the standard
http.ServeMux, mux.Router matches incoming requests against a list of
registered routes and calls a handler for the route that matches the URL
or other conditions. The main features are:
- Requests can be matched based on URL host, path, path prefix, schemes,
header and query values, HTTP methods or using custom matchers.
- URL hosts, paths and query values can have variables with an optional
regular expression.
- Registered URLs can be built, or "reversed", which helps maintaining
references to resources.
- Routes can be used as subrouters: nested routes are only tested if the
parent route matches. This is useful to define groups of routes that
share common conditions like a host, a path prefix or other repeated
attributes. As a bonus, this optimizes request matching.
- It implements the http.Handler interface so it is compatible with the
standard http.ServeMux.
Let's start registering a couple of URL paths and handlers:
func main() {
r := mux.NewRouter()
r.HandleFunc("/", HomeHandler)
r.HandleFunc("/products", ProductsHandler)
r.HandleFunc("/articles", ArticlesHandler)
http.Handle("/", r)
}
Here we register three routes mapping URL paths to handlers. This is
equivalent to how http.HandleFunc() works: if an incoming request URL matches
one of the paths, the corresponding handler is called passing
(http.ResponseWriter, *http.Request) as parameters.
Paths can have variables. They are defined using the format {name} or
{name:pattern}. If a regular expression pattern is not defined, the matched
variable will be anything until the next slash. For example:
r := mux.NewRouter()
r.HandleFunc("/products/{key}", ProductHandler)
r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
Groups can be used inside patterns, as long as they are non-capturing (?:re). For example:
r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler)
The names are used to create a map of route variables which can be retrieved
calling mux.Vars():
vars := mux.Vars(request)
category := vars["category"]
Note that if any capturing groups are present, mux will panic() during parsing. To prevent
this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to
"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably
when capturing groups were present.
And this is all you need to know about the basic usage. More advanced options
are explained below.
Routes can also be restricted to a domain or subdomain. Just define a host
pattern to be matched. They can also have variables:
r := mux.NewRouter()
// Only matches if domain is "www.example.com".
r.Host("www.example.com")
// Matches a dynamic subdomain.
r.Host("{subdomain:[a-z]+}.domain.com")
There are several other matchers that can be added. To match path prefixes:
r.PathPrefix("/products/")
...or HTTP methods:
r.Methods("GET", "POST")
...or URL schemes:
r.Schemes("https")
...or header values:
r.Headers("X-Requested-With", "XMLHttpRequest")
...or query values:
r.Queries("key", "value")
...or to use a custom matcher function:
r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
return r.ProtoMajor == 0
})
...and finally, it is possible to combine several matchers in a single route:
r.HandleFunc("/products", ProductsHandler).
Host("www.example.com").
Methods("GET").
Schemes("http")
Setting the same matching conditions again and again can be boring, so we have
a way to group several routes that share the same requirements.
We call it "subrouting".
For example, let's say we have several URLs that should only match when the
host is "www.example.com". Create a route for that host and get a "subrouter"
from it:
r := mux.NewRouter()
s := r.Host("www.example.com").Subrouter()
Then register routes in the subrouter:
s.HandleFunc("/products/", ProductsHandler)
s.HandleFunc("/products/{key}", ProductHandler)
s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
The three URL paths we registered above will only be tested if the domain is
"www.example.com", because the subrouter is tested first. This is not
only convenient, but also optimizes request matching. You can create
subrouters combining any attribute matchers accepted by a route.
Subrouters can be used to create domain or path "namespaces": you define
subrouters in a central place and then parts of the app can register its
paths relatively to a given subrouter.
There's one more thing about subroutes. When a subrouter has a path prefix,
the inner routes use it as base for their paths:
r := mux.NewRouter()
s := r.PathPrefix("/products").Subrouter()
// "/products/"
s.HandleFunc("/", ProductsHandler)
// "/products/{key}/"
s.HandleFunc("/{key}/", ProductHandler)
// "/products/{key}/details"
s.HandleFunc("/{key}/details", ProductDetailsHandler)
Note that the path provided to PathPrefix() represents a "wildcard": calling
PathPrefix("/static/").Handler(...) means that the handler will be passed any
request that matches "/static/*". This makes it easy to serve static files with mux:
func main() {
var dir string
flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
flag.Parse()
r := mux.NewRouter()
// This will serve files under http://localhost:8000/static/<filename>
r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
srv := &http.Server{
Handler: r,
Addr: "127.0.0.1:8000",
// Good practice: enforce timeouts for servers you create!
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
log.Fatal(srv.ListenAndServe())
}
Now let's see how to build registered URLs.
Routes can be named. All routes that define a name can have their URLs built,
or "reversed". We define a name calling Name() on a route. For example:
r := mux.NewRouter()
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
Name("article")
To build a URL, get the route and call the URL() method, passing a sequence of
key/value pairs for the route variables. For the previous route, we would do:
url, err := r.Get("article").URL("category", "technology", "id", "42")
...and the result will be a url.URL with the following path:
"/articles/technology/42"
This also works for host and query value variables:
r := mux.NewRouter()
r.Host("{subdomain}.domain.com").
Path("/articles/{category}/{id:[0-9]+}").
Queries("filter", "{filter}").
HandlerFunc(ArticleHandler).
Name("article")
// url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42",
"filter", "gorilla")
All variables defined in the route are required, and their values must
conform to the corresponding patterns. These requirements guarantee that a
generated URL will always match a registered route -- the only exception is
for explicitly defined "build-only" routes which never match.
Regex support also exists for matching Headers within a route. For example, we could do:
r.HeadersRegexp("Content-Type", "application/(text|json)")
...and the route will match both requests with a Content-Type of `application/json` as well as
`application/text`
There's also a way to build only the URL host or path for a route:
use the methods URLHost() or URLPath() instead. For the previous route,
we would do:
// "http://news.domain.com/"
host, err := r.Get("article").URLHost("subdomain", "news")
// "/articles/technology/42"
path, err := r.Get("article").URLPath("category", "technology", "id", "42")
And if you use subrouters, host and path defined separately can be built
as well:
r := mux.NewRouter()
s := r.Host("{subdomain}.domain.com").Subrouter()
s.Path("/articles/{category}/{id:[0-9]+}").
HandlerFunc(ArticleHandler).
Name("article")
// "http://news.domain.com/articles/technology/42"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42")
Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking.
type MiddlewareFunc func(http.Handler) http.Handler
Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created).
A very basic middleware which logs the URI of the request being handled could be written as:
func simpleMw(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Do stuff here
log.Println(r.RequestURI)
// Call the next handler, which can be another middleware in the chain, or the final handler.
next.ServeHTTP(w, r)
})
}
Middlewares can be added to a router using `Router.Use()`:
r := mux.NewRouter()
r.HandleFunc("/", handler)
r.Use(simpleMw)
A more complex authentication middleware, which maps session token to users, could be written as:
// Define our struct
type authenticationMiddleware struct {
tokenUsers map[string]string
}
// Initialize it somewhere
func (amw *authenticationMiddleware) Populate() {
amw.tokenUsers["00000000"] = "user0"
amw.tokenUsers["aaaaaaaa"] = "userA"
amw.tokenUsers["05f717e5"] = "randomUser"
amw.tokenUsers["deadbeef"] = "user0"
}
// Middleware function, which will be called for each request
func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
token := r.Header.Get("X-Session-Token")
if user, found := amw.tokenUsers[token]; found {
// We found the token in our map
log.Printf("Authenticated user %s\n", user)
next.ServeHTTP(w, r)
} else {
http.Error(w, "Forbidden", http.StatusForbidden)
}
})
}
r := mux.NewRouter()
r.HandleFunc("/", handler)
amw := authenticationMiddleware{tokenUsers: make(map[string]string)}
amw.Populate()
r.Use(amw.Middleware)
Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to.
*/
package mux

74
vendor/github.com/gorilla/mux/middleware.go generated vendored Normal file
View File

@ -0,0 +1,74 @@
package mux
import (
"net/http"
"strings"
)
// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler.
// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed
// to it, and then calls the handler passed as parameter to the MiddlewareFunc.
type MiddlewareFunc func(http.Handler) http.Handler
// middleware interface is anything which implements a MiddlewareFunc named Middleware.
type middleware interface {
Middleware(handler http.Handler) http.Handler
}
// Middleware allows MiddlewareFunc to implement the middleware interface.
func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler {
return mw(handler)
}
// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
func (r *Router) Use(mwf ...MiddlewareFunc) {
for _, fn := range mwf {
r.middlewares = append(r.middlewares, fn)
}
}
// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
func (r *Router) useInterface(mw middleware) {
r.middlewares = append(r.middlewares, mw)
}
// CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header
// on requests for routes that have an OPTIONS method matcher to all the method matchers on
// the route. Routes that do not explicitly handle OPTIONS requests will not be processed
// by the middleware. See examples for usage.
func CORSMethodMiddleware(r *Router) MiddlewareFunc {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
allMethods, err := getAllMethodsForRoute(r, req)
if err == nil {
for _, v := range allMethods {
if v == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ","))
}
}
}
next.ServeHTTP(w, req)
})
}
}
// getAllMethodsForRoute returns all the methods from method matchers matching a given
// request.
func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) {
var allMethods []string
for _, route := range r.routes {
var match RouteMatch
if route.Match(req, &match) || match.MatchErr == ErrMethodMismatch {
methods, err := route.GetMethods()
if err != nil {
return nil, err
}
allMethods = append(allMethods, methods...)
}
}
return allMethods, nil
}

608
vendor/github.com/gorilla/mux/mux.go generated vendored Normal file
View File

@ -0,0 +1,608 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mux
import (
"context"
"errors"
"fmt"
"net/http"
"path"
"regexp"
)
var (
// ErrMethodMismatch is returned when the method in the request does not match
// the method defined against the route.
ErrMethodMismatch = errors.New("method is not allowed")
// ErrNotFound is returned when no route match is found.
ErrNotFound = errors.New("no matching route was found")
)
// NewRouter returns a new router instance.
func NewRouter() *Router {
return &Router{namedRoutes: make(map[string]*Route)}
}
// Router registers routes to be matched and dispatches a handler.
//
// It implements the http.Handler interface, so it can be registered to serve
// requests:
//
// var router = mux.NewRouter()
//
// func main() {
// http.Handle("/", router)
// }
//
// Or, for Google App Engine, register it in a init() function:
//
// func init() {
// http.Handle("/", router)
// }
//
// This will send all incoming requests to the router.
type Router struct {
// Configurable Handler to be used when no route matches.
// This can be used to render your own 404 Not Found errors.
NotFoundHandler http.Handler
// Configurable Handler to be used when the request method does not match the route.
// This can be used to render your own 405 Method Not Allowed errors.
MethodNotAllowedHandler http.Handler
// Routes to be matched, in order.
routes []*Route
// Routes by name for URL building.
namedRoutes map[string]*Route
// If true, do not clear the request context after handling the request.
//
// Deprecated: No effect, since the context is stored on the request itself.
KeepContext bool
// Slice of middlewares to be called after a match is found
middlewares []middleware
// configuration shared with `Route`
routeConf
}
// common route configuration shared between `Router` and `Route`
type routeConf struct {
// If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
useEncodedPath bool
// If true, when the path pattern is "/path/", accessing "/path" will
// redirect to the former and vice versa.
strictSlash bool
// If true, when the path pattern is "/path//to", accessing "/path//to"
// will not redirect
skipClean bool
// Manager for the variables from host and path.
regexp routeRegexpGroup
// List of matchers.
matchers []matcher
// The scheme used when building URLs.
buildScheme string
buildVarsFunc BuildVarsFunc
}
// returns an effective deep copy of `routeConf`
func copyRouteConf(r routeConf) routeConf {
c := r
if r.regexp.path != nil {
c.regexp.path = copyRouteRegexp(r.regexp.path)
}
if r.regexp.host != nil {
c.regexp.host = copyRouteRegexp(r.regexp.host)
}
c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries))
for _, q := range r.regexp.queries {
c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q))
}
c.matchers = make([]matcher, len(r.matchers))
copy(c.matchers, r.matchers)
return c
}
func copyRouteRegexp(r *routeRegexp) *routeRegexp {
c := *r
return &c
}
// Match attempts to match the given request against the router's registered routes.
//
// If the request matches a route of this router or one of its subrouters the Route,
// Handler, and Vars fields of the the match argument are filled and this function
// returns true.
//
// If the request does not match any of this router's or its subrouters' routes
// then this function returns false. If available, a reason for the match failure
// will be filled in the match argument's MatchErr field. If the match failure type
// (eg: not found) has a registered handler, the handler is assigned to the Handler
// field of the match argument.
func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
for _, route := range r.routes {
if route.Match(req, match) {
// Build middleware chain if no error was found
if match.MatchErr == nil {
for i := len(r.middlewares) - 1; i >= 0; i-- {
match.Handler = r.middlewares[i].Middleware(match.Handler)
}
}
return true
}
}
if match.MatchErr == ErrMethodMismatch {
if r.MethodNotAllowedHandler != nil {
match.Handler = r.MethodNotAllowedHandler
return true
}
return false
}
// Closest match for a router (includes sub-routers)
if r.NotFoundHandler != nil {
match.Handler = r.NotFoundHandler
match.MatchErr = ErrNotFound
return true
}
match.MatchErr = ErrNotFound
return false
}
// ServeHTTP dispatches the handler registered in the matched route.
//
// When there is a match, the route variables can be retrieved calling
// mux.Vars(request).
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if !r.skipClean {
path := req.URL.Path
if r.useEncodedPath {
path = req.URL.EscapedPath()
}
// Clean path to canonical form and redirect.
if p := cleanPath(path); p != path {
// Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query.
// This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue:
// http://code.google.com/p/go/issues/detail?id=5252
url := *req.URL
url.Path = p
p = url.String()
w.Header().Set("Location", p)
w.WriteHeader(http.StatusMovedPermanently)
return
}
}
var match RouteMatch
var handler http.Handler
if r.Match(req, &match) {
handler = match.Handler
req = requestWithVars(req, match.Vars)
req = requestWithRoute(req, match.Route)
}
if handler == nil && match.MatchErr == ErrMethodMismatch {
handler = methodNotAllowedHandler()
}
if handler == nil {
handler = http.NotFoundHandler()
}
handler.ServeHTTP(w, req)
}
// Get returns a route registered with the given name.
func (r *Router) Get(name string) *Route {
return r.namedRoutes[name]
}
// GetRoute returns a route registered with the given name. This method
// was renamed to Get() and remains here for backwards compatibility.
func (r *Router) GetRoute(name string) *Route {
return r.namedRoutes[name]
}
// StrictSlash defines the trailing slash behavior for new routes. The initial
// value is false.
//
// When true, if the route path is "/path/", accessing "/path" will perform a redirect
// to the former and vice versa. In other words, your application will always
// see the path as specified in the route.
//
// When false, if the route path is "/path", accessing "/path/" will not match
// this route and vice versa.
//
// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for
// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed
// request will be made as a GET by most clients. Use middleware or client settings
// to modify this behaviour as needed.
//
// Special case: when a route sets a path prefix using the PathPrefix() method,
// strict slash is ignored for that route because the redirect behavior can't
// be determined from a prefix alone. However, any subrouters created from that
// route inherit the original StrictSlash setting.
func (r *Router) StrictSlash(value bool) *Router {
r.strictSlash = value
return r
}
// SkipClean defines the path cleaning behaviour for new routes. The initial
// value is false. Users should be careful about which routes are not cleaned
//
// When true, if the route path is "/path//to", it will remain with the double
// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/
//
// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will
// become /fetch/http/xkcd.com/534
func (r *Router) SkipClean(value bool) *Router {
r.skipClean = value
return r
}
// UseEncodedPath tells the router to match the encoded original path
// to the routes.
// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
//
// If not called, the router will match the unencoded path to the routes.
// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
func (r *Router) UseEncodedPath() *Router {
r.useEncodedPath = true
return r
}
// ----------------------------------------------------------------------------
// Route factories
// ----------------------------------------------------------------------------
// NewRoute registers an empty route.
func (r *Router) NewRoute() *Route {
// initialize a route with a copy of the parent router's configuration
route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
r.routes = append(r.routes, route)
return route
}
// Name registers a new route with a name.
// See Route.Name().
func (r *Router) Name(name string) *Route {
return r.NewRoute().Name(name)
}
// Handle registers a new route with a matcher for the URL path.
// See Route.Path() and Route.Handler().
func (r *Router) Handle(path string, handler http.Handler) *Route {
return r.NewRoute().Path(path).Handler(handler)
}
// HandleFunc registers a new route with a matcher for the URL path.
// See Route.Path() and Route.HandlerFunc().
func (r *Router) HandleFunc(path string, f func(http.ResponseWriter,
*http.Request)) *Route {
return r.NewRoute().Path(path).HandlerFunc(f)
}
// Headers registers a new route with a matcher for request header values.
// See Route.Headers().
func (r *Router) Headers(pairs ...string) *Route {
return r.NewRoute().Headers(pairs...)
}
// Host registers a new route with a matcher for the URL host.
// See Route.Host().
func (r *Router) Host(tpl string) *Route {
return r.NewRoute().Host(tpl)
}
// MatcherFunc registers a new route with a custom matcher function.
// See Route.MatcherFunc().
func (r *Router) MatcherFunc(f MatcherFunc) *Route {
return r.NewRoute().MatcherFunc(f)
}
// Methods registers a new route with a matcher for HTTP methods.
// See Route.Methods().
func (r *Router) Methods(methods ...string) *Route {
return r.NewRoute().Methods(methods...)
}
// Path registers a new route with a matcher for the URL path.
// See Route.Path().
func (r *Router) Path(tpl string) *Route {
return r.NewRoute().Path(tpl)
}
// PathPrefix registers a new route with a matcher for the URL path prefix.
// See Route.PathPrefix().
func (r *Router) PathPrefix(tpl string) *Route {
return r.NewRoute().PathPrefix(tpl)
}
// Queries registers a new route with a matcher for URL query values.
// See Route.Queries().
func (r *Router) Queries(pairs ...string) *Route {
return r.NewRoute().Queries(pairs...)
}
// Schemes registers a new route with a matcher for URL schemes.
// See Route.Schemes().
func (r *Router) Schemes(schemes ...string) *Route {
return r.NewRoute().Schemes(schemes...)
}
// BuildVarsFunc registers a new route with a custom function for modifying
// route variables before building a URL.
func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {
return r.NewRoute().BuildVarsFunc(f)
}
// Walk walks the router and all its sub-routers, calling walkFn for each route
// in the tree. The routes are walked in the order they were added. Sub-routers
// are explored depth-first.
func (r *Router) Walk(walkFn WalkFunc) error {
return r.walk(walkFn, []*Route{})
}
// SkipRouter is used as a return value from WalkFuncs to indicate that the
// router that walk is about to descend down to should be skipped.
var SkipRouter = errors.New("skip this router")
// WalkFunc is the type of the function called for each route visited by Walk.
// At every invocation, it is given the current route, and the current router,
// and a list of ancestor routes that lead to the current route.
type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
for _, t := range r.routes {
err := walkFn(t, r, ancestors)
if err == SkipRouter {
continue
}
if err != nil {
return err
}
for _, sr := range t.matchers {
if h, ok := sr.(*Router); ok {
ancestors = append(ancestors, t)
err := h.walk(walkFn, ancestors)
if err != nil {
return err
}
ancestors = ancestors[:len(ancestors)-1]
}
}
if h, ok := t.handler.(*Router); ok {
ancestors = append(ancestors, t)
err := h.walk(walkFn, ancestors)
if err != nil {
return err
}
ancestors = ancestors[:len(ancestors)-1]
}
}
return nil
}
// ----------------------------------------------------------------------------
// Context
// ----------------------------------------------------------------------------
// RouteMatch stores information about a matched route.
type RouteMatch struct {
Route *Route
Handler http.Handler
Vars map[string]string
// MatchErr is set to appropriate matching error
// It is set to ErrMethodMismatch if there is a mismatch in
// the request method and route method
MatchErr error
}
type contextKey int
const (
varsKey contextKey = iota
routeKey
)
// Vars returns the route variables for the current request, if any.
func Vars(r *http.Request) map[string]string {
if rv := r.Context().Value(varsKey); rv != nil {
return rv.(map[string]string)
}
return nil
}
// CurrentRoute returns the matched route for the current request, if any.
// This only works when called inside the handler of the matched route
// because the matched route is stored in the request context which is cleared
// after the handler returns.
func CurrentRoute(r *http.Request) *Route {
if rv := r.Context().Value(routeKey); rv != nil {
return rv.(*Route)
}
return nil
}
func requestWithVars(r *http.Request, vars map[string]string) *http.Request {
ctx := context.WithValue(r.Context(), varsKey, vars)
return r.WithContext(ctx)
}
func requestWithRoute(r *http.Request, route *Route) *http.Request {
ctx := context.WithValue(r.Context(), routeKey, route)
return r.WithContext(ctx)
}
// ----------------------------------------------------------------------------
// Helpers
// ----------------------------------------------------------------------------
// cleanPath returns the canonical path for p, eliminating . and .. elements.
// Borrowed from the net/http package.
func cleanPath(p string) string {
if p == "" {
return "/"
}
if p[0] != '/' {
p = "/" + p
}
np := path.Clean(p)
// path.Clean removes trailing slash except for root;
// put the trailing slash back if necessary.
if p[len(p)-1] == '/' && np != "/" {
np += "/"
}
return np
}
// uniqueVars returns an error if two slices contain duplicated strings.
func uniqueVars(s1, s2 []string) error {
for _, v1 := range s1 {
for _, v2 := range s2 {
if v1 == v2 {
return fmt.Errorf("mux: duplicated route variable %q", v2)
}
}
}
return nil
}
// checkPairs returns the count of strings passed in, and an error if
// the count is not an even number.
func checkPairs(pairs ...string) (int, error) {
length := len(pairs)
if length%2 != 0 {
return length, fmt.Errorf(
"mux: number of parameters must be multiple of 2, got %v", pairs)
}
return length, nil
}
// mapFromPairsToString converts variadic string parameters to a
// string to string map.
func mapFromPairsToString(pairs ...string) (map[string]string, error) {
length, err := checkPairs(pairs...)
if err != nil {
return nil, err
}
m := make(map[string]string, length/2)
for i := 0; i < length; i += 2 {
m[pairs[i]] = pairs[i+1]
}
return m, nil
}
// mapFromPairsToRegex converts variadic string parameters to a
// string to regex map.
func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
length, err := checkPairs(pairs...)
if err != nil {
return nil, err
}
m := make(map[string]*regexp.Regexp, length/2)
for i := 0; i < length; i += 2 {
regex, err := regexp.Compile(pairs[i+1])
if err != nil {
return nil, err
}
m[pairs[i]] = regex
}
return m, nil
}
// matchInArray returns true if the given string value is in the array.
func matchInArray(arr []string, value string) bool {
for _, v := range arr {
if v == value {
return true
}
}
return false
}
// matchMapWithString returns true if the given key/value pairs exist in a given map.
func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool {
for k, v := range toCheck {
// Check if key exists.
if canonicalKey {
k = http.CanonicalHeaderKey(k)
}
if values := toMatch[k]; values == nil {
return false
} else if v != "" {
// If value was defined as an empty string we only check that the
// key exists. Otherwise we also check for equality.
valueExists := false
for _, value := range values {
if v == value {
valueExists = true
break
}
}
if !valueExists {
return false
}
}
}
return true
}
// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against
// the given regex
func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool {
for k, v := range toCheck {
// Check if key exists.
if canonicalKey {
k = http.CanonicalHeaderKey(k)
}
if values := toMatch[k]; values == nil {
return false
} else if v != nil {
// If value was defined as an empty string we only check that the
// key exists. Otherwise we also check for equality.
valueExists := false
for _, value := range values {
if v.MatchString(value) {
valueExists = true
break
}
}
if !valueExists {
return false
}
}
}
return true
}
// methodNotAllowed replies to the request with an HTTP status code 405.
func methodNotAllowed(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusMethodNotAllowed)
}
// methodNotAllowedHandler returns a simple request handler
// that replies to each request with a status code 405.
func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) }

388
vendor/github.com/gorilla/mux/regexp.go generated vendored Normal file
View File

@ -0,0 +1,388 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mux
import (
"bytes"
"fmt"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
)
type routeRegexpOptions struct {
strictSlash bool
useEncodedPath bool
}
type regexpType int
const (
regexpTypePath regexpType = iota
regexpTypeHost
regexpTypePrefix
regexpTypeQuery
)
// newRouteRegexp parses a route template and returns a routeRegexp,
// used to match a host, a path or a query string.
//
// It will extract named variables, assemble a regexp to be matched, create
// a "reverse" template to build URLs and compile regexps to validate variable
// values used in URL building.
//
// Previously we accepted only Python-like identifiers for variable
// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
// name and pattern can't be empty, and names can't contain a colon.
func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) {
// Check if it is well-formed.
idxs, errBraces := braceIndices(tpl)
if errBraces != nil {
return nil, errBraces
}
// Backup the original.
template := tpl
// Now let's parse it.
defaultPattern := "[^/]+"
if typ == regexpTypeQuery {
defaultPattern = ".*"
} else if typ == regexpTypeHost {
defaultPattern = "[^.]+"
}
// Only match strict slash if not matching
if typ != regexpTypePath {
options.strictSlash = false
}
// Set a flag for strictSlash.
endSlash := false
if options.strictSlash && strings.HasSuffix(tpl, "/") {
tpl = tpl[:len(tpl)-1]
endSlash = true
}
varsN := make([]string, len(idxs)/2)
varsR := make([]*regexp.Regexp, len(idxs)/2)
pattern := bytes.NewBufferString("")
pattern.WriteByte('^')
reverse := bytes.NewBufferString("")
var end int
var err error
for i := 0; i < len(idxs); i += 2 {
// Set all values we are interested in.
raw := tpl[end:idxs[i]]
end = idxs[i+1]
parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2)
name := parts[0]
patt := defaultPattern
if len(parts) == 2 {
patt = parts[1]
}
// Name or pattern can't be empty.
if name == "" || patt == "" {
return nil, fmt.Errorf("mux: missing name or pattern in %q",
tpl[idxs[i]:end])
}
// Build the regexp pattern.
fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt)
// Build the reverse template.
fmt.Fprintf(reverse, "%s%%s", raw)
// Append variable name and compiled pattern.
varsN[i/2] = name
varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
if err != nil {
return nil, err
}
}
// Add the remaining.
raw := tpl[end:]
pattern.WriteString(regexp.QuoteMeta(raw))
if options.strictSlash {
pattern.WriteString("[/]?")
}
if typ == regexpTypeQuery {
// Add the default pattern if the query value is empty
if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
pattern.WriteString(defaultPattern)
}
}
if typ != regexpTypePrefix {
pattern.WriteByte('$')
}
var wildcardHostPort bool
if typ == regexpTypeHost {
if !strings.Contains(pattern.String(), ":") {
wildcardHostPort = true
}
}
reverse.WriteString(raw)
if endSlash {
reverse.WriteByte('/')
}
// Compile full regexp.
reg, errCompile := regexp.Compile(pattern.String())
if errCompile != nil {
return nil, errCompile
}
// Check for capturing groups which used to work in older versions
if reg.NumSubexp() != len(idxs)/2 {
panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) +
"Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)")
}
// Done!
return &routeRegexp{
template: template,
regexpType: typ,
options: options,
regexp: reg,
reverse: reverse.String(),
varsN: varsN,
varsR: varsR,
wildcardHostPort: wildcardHostPort,
}, nil
}
// routeRegexp stores a regexp to match a host or path and information to
// collect and validate route variables.
type routeRegexp struct {
// The unmodified template.
template string
// The type of match
regexpType regexpType
// Options for matching
options routeRegexpOptions
// Expanded regexp.
regexp *regexp.Regexp
// Reverse template.
reverse string
// Variable names.
varsN []string
// Variable regexps (validators).
varsR []*regexp.Regexp
// Wildcard host-port (no strict port match in hostname)
wildcardHostPort bool
}
// Match matches the regexp against the URL host or path.
func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
if r.regexpType == regexpTypeHost {
host := getHost(req)
if r.wildcardHostPort {
// Don't be strict on the port match
if i := strings.Index(host, ":"); i != -1 {
host = host[:i]
}
}
return r.regexp.MatchString(host)
}
if r.regexpType == regexpTypeQuery {
return r.matchQueryString(req)
}
path := req.URL.Path
if r.options.useEncodedPath {
path = req.URL.EscapedPath()
}
return r.regexp.MatchString(path)
}
// url builds a URL part using the given values.
func (r *routeRegexp) url(values map[string]string) (string, error) {
urlValues := make([]interface{}, len(r.varsN))
for k, v := range r.varsN {
value, ok := values[v]
if !ok {
return "", fmt.Errorf("mux: missing route variable %q", v)
}
if r.regexpType == regexpTypeQuery {
value = url.QueryEscape(value)
}
urlValues[k] = value
}
rv := fmt.Sprintf(r.reverse, urlValues...)
if !r.regexp.MatchString(rv) {
// The URL is checked against the full regexp, instead of checking
// individual variables. This is faster but to provide a good error
// message, we check individual regexps if the URL doesn't match.
for k, v := range r.varsN {
if !r.varsR[k].MatchString(values[v]) {
return "", fmt.Errorf(
"mux: variable %q doesn't match, expected %q", values[v],
r.varsR[k].String())
}
}
}
return rv, nil
}
// getURLQuery returns a single query parameter from a request URL.
// For a URL with foo=bar&baz=ding, we return only the relevant key
// value pair for the routeRegexp.
func (r *routeRegexp) getURLQuery(req *http.Request) string {
if r.regexpType != regexpTypeQuery {
return ""
}
templateKey := strings.SplitN(r.template, "=", 2)[0]
val, ok := findFirstQueryKey(req.URL.RawQuery, templateKey)
if ok {
return templateKey + "=" + val
}
return ""
}
// findFirstQueryKey returns the same result as (*url.URL).Query()[key][0].
// If key was not found, empty string and false is returned.
func findFirstQueryKey(rawQuery, key string) (value string, ok bool) {
query := []byte(rawQuery)
for len(query) > 0 {
foundKey := query
if i := bytes.IndexAny(foundKey, "&;"); i >= 0 {
foundKey, query = foundKey[:i], foundKey[i+1:]
} else {
query = query[:0]
}
if len(foundKey) == 0 {
continue
}
var value []byte
if i := bytes.IndexByte(foundKey, '='); i >= 0 {
foundKey, value = foundKey[:i], foundKey[i+1:]
}
if len(foundKey) < len(key) {
// Cannot possibly be key.
continue
}
keyString, err := url.QueryUnescape(string(foundKey))
if err != nil {
continue
}
if keyString != key {
continue
}
valueString, err := url.QueryUnescape(string(value))
if err != nil {
continue
}
return valueString, true
}
return "", false
}
func (r *routeRegexp) matchQueryString(req *http.Request) bool {
return r.regexp.MatchString(r.getURLQuery(req))
}
// braceIndices returns the first level curly brace indices from a string.
// It returns an error in case of unbalanced braces.
func braceIndices(s string) ([]int, error) {
var level, idx int
var idxs []int
for i := 0; i < len(s); i++ {
switch s[i] {
case '{':
if level++; level == 1 {
idx = i
}
case '}':
if level--; level == 0 {
idxs = append(idxs, idx, i+1)
} else if level < 0 {
return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
}
}
}
if level != 0 {
return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
}
return idxs, nil
}
// varGroupName builds a capturing group name for the indexed variable.
func varGroupName(idx int) string {
return "v" + strconv.Itoa(idx)
}
// ----------------------------------------------------------------------------
// routeRegexpGroup
// ----------------------------------------------------------------------------
// routeRegexpGroup groups the route matchers that carry variables.
type routeRegexpGroup struct {
host *routeRegexp
path *routeRegexp
queries []*routeRegexp
}
// setMatch extracts the variables from the URL once a route matches.
func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
// Store host variables.
if v.host != nil {
host := getHost(req)
if v.host.wildcardHostPort {
// Don't be strict on the port match
if i := strings.Index(host, ":"); i != -1 {
host = host[:i]
}
}
matches := v.host.regexp.FindStringSubmatchIndex(host)
if len(matches) > 0 {
extractVars(host, matches, v.host.varsN, m.Vars)
}
}
path := req.URL.Path
if r.useEncodedPath {
path = req.URL.EscapedPath()
}
// Store path variables.
if v.path != nil {
matches := v.path.regexp.FindStringSubmatchIndex(path)
if len(matches) > 0 {
extractVars(path, matches, v.path.varsN, m.Vars)
// Check if we should redirect.
if v.path.options.strictSlash {
p1 := strings.HasSuffix(path, "/")
p2 := strings.HasSuffix(v.path.template, "/")
if p1 != p2 {
u, _ := url.Parse(req.URL.String())
if p1 {
u.Path = u.Path[:len(u.Path)-1]
} else {
u.Path += "/"
}
m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently)
}
}
}
}
// Store query string variables.
for _, q := range v.queries {
queryURL := q.getURLQuery(req)
matches := q.regexp.FindStringSubmatchIndex(queryURL)
if len(matches) > 0 {
extractVars(queryURL, matches, q.varsN, m.Vars)
}
}
}
// getHost tries its best to return the request host.
// According to section 14.23 of RFC 2616 the Host header
// can include the port number if the default value of 80 is not used.
func getHost(r *http.Request) string {
if r.URL.IsAbs() {
return r.URL.Host
}
return r.Host
}
func extractVars(input string, matches []int, names []string, output map[string]string) {
for i, name := range names {
output[name] = input[matches[2*i+2]:matches[2*i+3]]
}
}

765
vendor/github.com/gorilla/mux/route.go generated vendored Normal file
View File

@ -0,0 +1,765 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mux
import (
"errors"
"fmt"
"net/http"
"net/url"
"regexp"
"strings"
)
// Route stores information to match a request and build URLs.
type Route struct {
// Request handler for the route.
handler http.Handler
// If true, this route never matches: it is only used to build URLs.
buildOnly bool
// The name used to build URLs.
name string
// Error resulted from building a route.
err error
// "global" reference to all named routes
namedRoutes map[string]*Route
// config possibly passed in from `Router`
routeConf
}
// SkipClean reports whether path cleaning is enabled for this route via
// Router.SkipClean.
func (r *Route) SkipClean() bool {
return r.skipClean
}
// Match matches the route against the request.
func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
if r.buildOnly || r.err != nil {
return false
}
var matchErr error
// Match everything.
for _, m := range r.matchers {
if matched := m.Match(req, match); !matched {
if _, ok := m.(methodMatcher); ok {
matchErr = ErrMethodMismatch
continue
}
// Ignore ErrNotFound errors. These errors arise from match call
// to Subrouters.
//
// This prevents subsequent matching subrouters from failing to
// run middleware. If not ignored, the middleware would see a
// non-nil MatchErr and be skipped, even when there was a
// matching route.
if match.MatchErr == ErrNotFound {
match.MatchErr = nil
}
matchErr = nil // nolint:ineffassign
return false
} else {
// Multiple routes may share the same path but use different HTTP methods. For instance:
// Route 1: POST "/users/{id}".
// Route 2: GET "/users/{id}", parameters: "id": "[0-9]+".
//
// The router must handle these cases correctly. For a GET request to "/users/abc" with "id" as "-2",
// The router should return a "Not Found" error as no route fully matches this request.
if match.MatchErr == ErrMethodMismatch {
match.MatchErr = nil
}
}
}
if matchErr != nil {
match.MatchErr = matchErr
return false
}
if match.MatchErr == ErrMethodMismatch && r.handler != nil {
// We found a route which matches request method, clear MatchErr
match.MatchErr = nil
// Then override the mis-matched handler
match.Handler = r.handler
}
// Yay, we have a match. Let's collect some info about it.
if match.Route == nil {
match.Route = r
}
if match.Handler == nil {
match.Handler = r.handler
}
if match.Vars == nil {
match.Vars = make(map[string]string)
}
// Set variables.
r.regexp.setMatch(req, match, r)
return true
}
// ----------------------------------------------------------------------------
// Route attributes
// ----------------------------------------------------------------------------
// GetError returns an error resulted from building the route, if any.
func (r *Route) GetError() error {
return r.err
}
// BuildOnly sets the route to never match: it is only used to build URLs.
func (r *Route) BuildOnly() *Route {
r.buildOnly = true
return r
}
// Handler --------------------------------------------------------------------
// Handler sets a handler for the route.
func (r *Route) Handler(handler http.Handler) *Route {
if r.err == nil {
r.handler = handler
}
return r
}
// HandlerFunc sets a handler function for the route.
func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {
return r.Handler(http.HandlerFunc(f))
}
// GetHandler returns the handler for the route, if any.
func (r *Route) GetHandler() http.Handler {
return r.handler
}
// Name -----------------------------------------------------------------------
// Name sets the name for the route, used to build URLs.
// It is an error to call Name more than once on a route.
func (r *Route) Name(name string) *Route {
if r.name != "" {
r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
r.name, name)
}
if r.err == nil {
r.name = name
r.namedRoutes[name] = r
}
return r
}
// GetName returns the name for the route, if any.
func (r *Route) GetName() string {
return r.name
}
// ----------------------------------------------------------------------------
// Matchers
// ----------------------------------------------------------------------------
// matcher types try to match a request.
type matcher interface {
Match(*http.Request, *RouteMatch) bool
}
// addMatcher adds a matcher to the route.
func (r *Route) addMatcher(m matcher) *Route {
if r.err == nil {
r.matchers = append(r.matchers, m)
}
return r
}
// addRegexpMatcher adds a host or path matcher and builder to a route.
func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error {
if r.err != nil {
return r.err
}
if typ == regexpTypePath || typ == regexpTypePrefix {
if len(tpl) > 0 && tpl[0] != '/' {
return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
}
if r.regexp.path != nil {
tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
}
}
rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{
strictSlash: r.strictSlash,
useEncodedPath: r.useEncodedPath,
})
if err != nil {
return err
}
for _, q := range r.regexp.queries {
if err = uniqueVars(rr.varsN, q.varsN); err != nil {
return err
}
}
if typ == regexpTypeHost {
if r.regexp.path != nil {
if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
return err
}
}
r.regexp.host = rr
} else {
if r.regexp.host != nil {
if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {
return err
}
}
if typ == regexpTypeQuery {
r.regexp.queries = append(r.regexp.queries, rr)
} else {
r.regexp.path = rr
}
}
r.addMatcher(rr)
return nil
}
// Headers --------------------------------------------------------------------
// headerMatcher matches the request against header values.
type headerMatcher map[string]string
func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
return matchMapWithString(m, r.Header, true)
}
// Headers adds a matcher for request header values.
// It accepts a sequence of key/value pairs to be matched. For example:
//
// r := mux.NewRouter().NewRoute()
// r.Headers("Content-Type", "application/json",
// "X-Requested-With", "XMLHttpRequest")
//
// The above route will only match if both request header values match.
// If the value is an empty string, it will match any value if the key is set.
func (r *Route) Headers(pairs ...string) *Route {
if r.err == nil {
var headers map[string]string
headers, r.err = mapFromPairsToString(pairs...)
return r.addMatcher(headerMatcher(headers))
}
return r
}
// headerRegexMatcher matches the request against the route given a regex for the header
type headerRegexMatcher map[string]*regexp.Regexp
func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
return matchMapWithRegex(m, r.Header, true)
}
// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
// support. For example:
//
// r := mux.NewRouter().NewRoute()
// r.HeadersRegexp("Content-Type", "application/(text|json)",
// "X-Requested-With", "XMLHttpRequest")
//
// The above route will only match if both the request header matches both regular expressions.
// If the value is an empty string, it will match any value if the key is set.
// Use the start and end of string anchors (^ and $) to match an exact value.
func (r *Route) HeadersRegexp(pairs ...string) *Route {
if r.err == nil {
var headers map[string]*regexp.Regexp
headers, r.err = mapFromPairsToRegex(pairs...)
return r.addMatcher(headerRegexMatcher(headers))
}
return r
}
// Host -----------------------------------------------------------------------
// Host adds a matcher for the URL host.
// It accepts a template with zero or more URL variables enclosed by {}.
// Variables can define an optional regexp pattern to be matched:
//
// - {name} matches anything until the next dot.
//
// - {name:pattern} matches the given regexp pattern.
//
// For example:
//
// r := mux.NewRouter().NewRoute()
// r.Host("www.example.com")
// r.Host("{subdomain}.domain.com")
// r.Host("{subdomain:[a-z]+}.domain.com")
//
// Variable names must be unique in a given route. They can be retrieved
// calling mux.Vars(request).
func (r *Route) Host(tpl string) *Route {
r.err = r.addRegexpMatcher(tpl, regexpTypeHost)
return r
}
// MatcherFunc ----------------------------------------------------------------
// MatcherFunc is the function signature used by custom matchers.
type MatcherFunc func(*http.Request, *RouteMatch) bool
// Match returns the match for a given request.
func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
return m(r, match)
}
// MatcherFunc adds a custom function to be used as request matcher.
func (r *Route) MatcherFunc(f MatcherFunc) *Route {
return r.addMatcher(f)
}
// Methods --------------------------------------------------------------------
// methodMatcher matches the request against HTTP methods.
type methodMatcher []string
func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {
return matchInArray(m, r.Method)
}
// Methods adds a matcher for HTTP methods.
// It accepts a sequence of one or more methods to be matched, e.g.:
// "GET", "POST", "PUT".
func (r *Route) Methods(methods ...string) *Route {
for k, v := range methods {
methods[k] = strings.ToUpper(v)
}
return r.addMatcher(methodMatcher(methods))
}
// Path -----------------------------------------------------------------------
// Path adds a matcher for the URL path.
// It accepts a template with zero or more URL variables enclosed by {}. The
// template must start with a "/".
// Variables can define an optional regexp pattern to be matched:
//
// - {name} matches anything until the next slash.
//
// - {name:pattern} matches the given regexp pattern.
//
// For example:
//
// r := mux.NewRouter().NewRoute()
// r.Path("/products/").Handler(ProductsHandler)
// r.Path("/products/{key}").Handler(ProductsHandler)
// r.Path("/articles/{category}/{id:[0-9]+}").
// Handler(ArticleHandler)
//
// Variable names must be unique in a given route. They can be retrieved
// calling mux.Vars(request).
func (r *Route) Path(tpl string) *Route {
r.err = r.addRegexpMatcher(tpl, regexpTypePath)
return r
}
// PathPrefix -----------------------------------------------------------------
// PathPrefix adds a matcher for the URL path prefix. This matches if the given
// template is a prefix of the full URL path. See Route.Path() for details on
// the tpl argument.
//
// Note that it does not treat slashes specially ("/foobar/" will be matched by
// the prefix "/foo") so you may want to use a trailing slash here.
//
// Also note that the setting of Router.StrictSlash() has no effect on routes
// with a PathPrefix matcher.
func (r *Route) PathPrefix(tpl string) *Route {
r.err = r.addRegexpMatcher(tpl, regexpTypePrefix)
return r
}
// Query ----------------------------------------------------------------------
// Queries adds a matcher for URL query values.
// It accepts a sequence of key/value pairs. Values may define variables.
// For example:
//
// r := mux.NewRouter().NewRoute()
// r.Queries("foo", "bar", "id", "{id:[0-9]+}")
//
// The above route will only match if the URL contains the defined queries
// values, e.g.: ?foo=bar&id=42.
//
// If the value is an empty string, it will match any value if the key is set.
//
// Variables can define an optional regexp pattern to be matched:
//
// - {name} matches anything until the next slash.
//
// - {name:pattern} matches the given regexp pattern.
func (r *Route) Queries(pairs ...string) *Route {
length := len(pairs)
if length%2 != 0 {
r.err = fmt.Errorf(
"mux: number of parameters must be multiple of 2, got %v", pairs)
return nil
}
for i := 0; i < length; i += 2 {
if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil {
return r
}
}
return r
}
// Schemes --------------------------------------------------------------------
// schemeMatcher matches the request against URL schemes.
type schemeMatcher []string
func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
scheme := r.URL.Scheme
// https://golang.org/pkg/net/http/#Request
// "For [most] server requests, fields other than Path and RawQuery will be
// empty."
// Since we're an http muxer, the scheme is either going to be http or https
// though, so we can just set it based on the tls termination state.
if scheme == "" {
if r.TLS == nil {
scheme = "http"
} else {
scheme = "https"
}
}
return matchInArray(m, scheme)
}
// Schemes adds a matcher for URL schemes.
// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
// If the request's URL has a scheme set, it will be matched against.
// Generally, the URL scheme will only be set if a previous handler set it,
// such as the ProxyHeaders handler from gorilla/handlers.
// If unset, the scheme will be determined based on the request's TLS
// termination state.
// The first argument to Schemes will be used when constructing a route URL.
func (r *Route) Schemes(schemes ...string) *Route {
for k, v := range schemes {
schemes[k] = strings.ToLower(v)
}
if len(schemes) > 0 {
r.buildScheme = schemes[0]
}
return r.addMatcher(schemeMatcher(schemes))
}
// BuildVarsFunc --------------------------------------------------------------
// BuildVarsFunc is the function signature used by custom build variable
// functions (which can modify route variables before a route's URL is built).
type BuildVarsFunc func(map[string]string) map[string]string
// BuildVarsFunc adds a custom function to be used to modify build variables
// before a route's URL is built.
func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
if r.buildVarsFunc != nil {
// compose the old and new functions
old := r.buildVarsFunc
r.buildVarsFunc = func(m map[string]string) map[string]string {
return f(old(m))
}
} else {
r.buildVarsFunc = f
}
return r
}
// Subrouter ------------------------------------------------------------------
// Subrouter creates a subrouter for the route.
//
// It will test the inner routes only if the parent route matched. For example:
//
// r := mux.NewRouter().NewRoute()
// s := r.Host("www.example.com").Subrouter()
// s.HandleFunc("/products/", ProductsHandler)
// s.HandleFunc("/products/{key}", ProductHandler)
// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
//
// Here, the routes registered in the subrouter won't be tested if the host
// doesn't match.
func (r *Route) Subrouter() *Router {
// initialize a subrouter with a copy of the parent route's configuration
router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
r.addMatcher(router)
return router
}
// ----------------------------------------------------------------------------
// URL building
// ----------------------------------------------------------------------------
// URL builds a URL for the route.
//
// It accepts a sequence of key/value pairs for the route variables. For
// example, given this route:
//
// r := mux.NewRouter()
// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
// Name("article")
//
// ...a URL for it can be built using:
//
// url, err := r.Get("article").URL("category", "technology", "id", "42")
//
// ...which will return an url.URL with the following path:
//
// "/articles/technology/42"
//
// This also works for host variables:
//
// r := mux.NewRouter()
// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
// Host("{subdomain}.domain.com").
// Name("article")
//
// // url.String() will be "http://news.domain.com/articles/technology/42"
// url, err := r.Get("article").URL("subdomain", "news",
// "category", "technology",
// "id", "42")
//
// The scheme of the resulting url will be the first argument that was passed to Schemes:
//
// // url.String() will be "https://example.com"
// r := mux.NewRouter().NewRoute()
// url, err := r.Host("example.com")
// .Schemes("https", "http").URL()
//
// All variables defined in the route are required, and their values must
// conform to the corresponding patterns.
func (r *Route) URL(pairs ...string) (*url.URL, error) {
if r.err != nil {
return nil, r.err
}
values, err := r.prepareVars(pairs...)
if err != nil {
return nil, err
}
var scheme, host, path string
queries := make([]string, 0, len(r.regexp.queries))
if r.regexp.host != nil {
if host, err = r.regexp.host.url(values); err != nil {
return nil, err
}
scheme = "http"
if r.buildScheme != "" {
scheme = r.buildScheme
}
}
if r.regexp.path != nil {
if path, err = r.regexp.path.url(values); err != nil {
return nil, err
}
}
for _, q := range r.regexp.queries {
var query string
if query, err = q.url(values); err != nil {
return nil, err
}
queries = append(queries, query)
}
return &url.URL{
Scheme: scheme,
Host: host,
Path: path,
RawQuery: strings.Join(queries, "&"),
}, nil
}
// URLHost builds the host part of the URL for a route. See Route.URL().
//
// The route must have a host defined.
func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
if r.err != nil {
return nil, r.err
}
if r.regexp.host == nil {
return nil, errors.New("mux: route doesn't have a host")
}
values, err := r.prepareVars(pairs...)
if err != nil {
return nil, err
}
host, err := r.regexp.host.url(values)
if err != nil {
return nil, err
}
u := &url.URL{
Scheme: "http",
Host: host,
}
if r.buildScheme != "" {
u.Scheme = r.buildScheme
}
return u, nil
}
// URLPath builds the path part of the URL for a route. See Route.URL().
//
// The route must have a path defined.
func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
if r.err != nil {
return nil, r.err
}
if r.regexp.path == nil {
return nil, errors.New("mux: route doesn't have a path")
}
values, err := r.prepareVars(pairs...)
if err != nil {
return nil, err
}
path, err := r.regexp.path.url(values)
if err != nil {
return nil, err
}
return &url.URL{
Path: path,
}, nil
}
// GetPathTemplate returns the template used to build the
// route match.
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if the route does not define a path.
func (r *Route) GetPathTemplate() (string, error) {
if r.err != nil {
return "", r.err
}
if r.regexp.path == nil {
return "", errors.New("mux: route doesn't have a path")
}
return r.regexp.path.template, nil
}
// GetPathRegexp returns the expanded regular expression used to match route path.
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if the route does not define a path.
func (r *Route) GetPathRegexp() (string, error) {
if r.err != nil {
return "", r.err
}
if r.regexp.path == nil {
return "", errors.New("mux: route does not have a path")
}
return r.regexp.path.regexp.String(), nil
}
// GetQueriesRegexp returns the expanded regular expressions used to match the
// route queries.
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if the route does not have queries.
func (r *Route) GetQueriesRegexp() ([]string, error) {
if r.err != nil {
return nil, r.err
}
if r.regexp.queries == nil {
return nil, errors.New("mux: route doesn't have queries")
}
queries := make([]string, 0, len(r.regexp.queries))
for _, query := range r.regexp.queries {
queries = append(queries, query.regexp.String())
}
return queries, nil
}
// GetQueriesTemplates returns the templates used to build the
// query matching.
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if the route does not define queries.
func (r *Route) GetQueriesTemplates() ([]string, error) {
if r.err != nil {
return nil, r.err
}
if r.regexp.queries == nil {
return nil, errors.New("mux: route doesn't have queries")
}
queries := make([]string, 0, len(r.regexp.queries))
for _, query := range r.regexp.queries {
queries = append(queries, query.template)
}
return queries, nil
}
// GetMethods returns the methods the route matches against
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if route does not have methods.
func (r *Route) GetMethods() ([]string, error) {
if r.err != nil {
return nil, r.err
}
for _, m := range r.matchers {
if methods, ok := m.(methodMatcher); ok {
return []string(methods), nil
}
}
return nil, errors.New("mux: route doesn't have methods")
}
// GetHostTemplate returns the template used to build the
// route match.
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if the route does not define a host.
func (r *Route) GetHostTemplate() (string, error) {
if r.err != nil {
return "", r.err
}
if r.regexp.host == nil {
return "", errors.New("mux: route doesn't have a host")
}
return r.regexp.host.template, nil
}
// GetVarNames returns the names of all variables added by regexp matchers
// These can be used to know which route variables should be passed into r.URL()
func (r *Route) GetVarNames() ([]string, error) {
if r.err != nil {
return nil, r.err
}
var varNames []string
if r.regexp.host != nil {
varNames = append(varNames, r.regexp.host.varsN...)
}
if r.regexp.path != nil {
varNames = append(varNames, r.regexp.path.varsN...)
}
for _, regx := range r.regexp.queries {
varNames = append(varNames, regx.varsN...)
}
return varNames, nil
}
// prepareVars converts the route variable pairs into a map. If the route has a
// BuildVarsFunc, it is invoked.
func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
m, err := mapFromPairsToString(pairs...)
if err != nil {
return nil, err
}
return r.buildVars(m), nil
}
func (r *Route) buildVars(m map[string]string) map[string]string {
if r.buildVarsFunc != nil {
m = r.buildVarsFunc(m)
}
return m
}

19
vendor/github.com/gorilla/mux/test_helpers.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mux
import "net/http"
// SetURLVars sets the URL variables for the given request, to be accessed via
// mux.Vars for testing route behaviour. Arguments are not modified, a shallow
// copy is returned.
//
// This API should only be used for testing purposes; it provides a way to
// inject variables into the request context. Alternatively, URL variables
// can be set by making a route that captures the required variables,
// starting a server and sending the request to that server.
func SetURLVars(r *http.Request, val map[string]string) *http.Request {
return requestWithVars(r, val)
}

6
vendor/github.com/lib/pq/.gitignore generated vendored Normal file
View File

@ -0,0 +1,6 @@
.db
*.test
*~
*.swp
.idea
.vscode

8
vendor/github.com/lib/pq/LICENSE.md generated vendored Normal file
View File

@ -0,0 +1,8 @@
Copyright (c) 2011-2013, 'pq' Contributors
Portions Copyright (C) 2011 Blake Mizerany
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

36
vendor/github.com/lib/pq/README.md generated vendored Normal file
View File

@ -0,0 +1,36 @@
# pq - A pure Go postgres driver for Go's database/sql package
[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://pkg.go.dev/github.com/lib/pq?tab=doc)
## Install
go get github.com/lib/pq
## Features
* SSL
* Handles bad connections for `database/sql`
* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`)
* Scan binary blobs correctly (i.e. `bytea`)
* Package for `hstore` support
* COPY FROM support
* pq.ParseURL for converting urls to connection strings for sql.Open.
* Many libpq compatible environment variables
* Unix socket support
* Notifications: `LISTEN`/`NOTIFY`
* pgpass support
* GSS (Kerberos) auth
## Tests
`go test` is used for testing. See [TESTS.md](TESTS.md) for more details.
## Status
This package is currently in maintenance mode, which means:
1. It generally does not accept new features.
2. It does accept bug fixes and version compatability changes provided by the community.
3. Maintainers usually do not resolve reported issues.
4. Community members are encouraged to help each other with reported issues.
For users that require new features or reliable resolution of reported bugs, we recommend using [pgx](https://github.com/jackc/pgx) which is under active development.

33
vendor/github.com/lib/pq/TESTS.md generated vendored Normal file
View File

@ -0,0 +1,33 @@
# Tests
## Running Tests
`go test` is used for testing. A running PostgreSQL
server is required, with the ability to log in. The
database to connect to test with is "pqgotest," on
"localhost" but these can be overridden using [environment
variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html).
Example:
PGHOST=/run/postgresql go test
## Benchmarks
A benchmark suite can be run as part of the tests:
go test -bench .
## Example setup (Docker)
Run a postgres container:
```
docker run --expose 5432:5432 postgres
```
Run tests:
```
PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test
```

895
vendor/github.com/lib/pq/array.go generated vendored Normal file
View File

@ -0,0 +1,895 @@
package pq
import (
"bytes"
"database/sql"
"database/sql/driver"
"encoding/hex"
"fmt"
"reflect"
"strconv"
"strings"
)
var typeByteSlice = reflect.TypeOf([]byte{})
var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
// Array returns the optimal driver.Valuer and sql.Scanner for an array or
// slice of any dimension.
//
// For example:
// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401}))
//
// var x []sql.NullInt64
// db.QueryRow(`SELECT ARRAY[235, 401]`).Scan(pq.Array(&x))
//
// Scanning multi-dimensional arrays is not supported. Arrays where the lower
// bound is not one (such as `[0:0]={1}') are not supported.
func Array(a interface{}) interface {
driver.Valuer
sql.Scanner
} {
switch a := a.(type) {
case []bool:
return (*BoolArray)(&a)
case []float64:
return (*Float64Array)(&a)
case []float32:
return (*Float32Array)(&a)
case []int64:
return (*Int64Array)(&a)
case []int32:
return (*Int32Array)(&a)
case []string:
return (*StringArray)(&a)
case [][]byte:
return (*ByteaArray)(&a)
case *[]bool:
return (*BoolArray)(a)
case *[]float64:
return (*Float64Array)(a)
case *[]float32:
return (*Float32Array)(a)
case *[]int64:
return (*Int64Array)(a)
case *[]int32:
return (*Int32Array)(a)
case *[]string:
return (*StringArray)(a)
case *[][]byte:
return (*ByteaArray)(a)
}
return GenericArray{a}
}
// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner
// to override the array delimiter used by GenericArray.
type ArrayDelimiter interface {
// ArrayDelimiter returns the delimiter character(s) for this element's type.
ArrayDelimiter() string
}
// BoolArray represents a one-dimensional array of the PostgreSQL boolean type.
type BoolArray []bool
// Scan implements the sql.Scanner interface.
func (a *BoolArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to BoolArray", src)
}
func (a *BoolArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "BoolArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(BoolArray, len(elems))
for i, v := range elems {
if len(v) != 1 {
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
}
switch v[0] {
case 't':
b[i] = true
case 'f':
b[i] = false
default:
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a BoolArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be exactly two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1+2*n)
for i := 0; i < n; i++ {
b[2*i] = ','
if a[i] {
b[1+2*i] = 't'
} else {
b[1+2*i] = 'f'
}
}
b[0] = '{'
b[2*n] = '}'
return string(b), nil
}
return "{}", nil
}
// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type.
type ByteaArray [][]byte
// Scan implements the sql.Scanner interface.
func (a *ByteaArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to ByteaArray", src)
}
func (a *ByteaArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "ByteaArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(ByteaArray, len(elems))
for i, v := range elems {
b[i], err = parseBytea(v)
if err != nil {
return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error())
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface. It uses the "hex" format which
// is only supported on PostgreSQL 9.0 or newer.
func (a ByteaArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, 2*N bytes of quotes,
// 3*N bytes of hex formatting, and N-1 bytes of delimiters.
size := 1 + 6*n
for _, x := range a {
size += hex.EncodedLen(len(x))
}
b := make([]byte, size)
for i, s := 0, b; i < n; i++ {
o := copy(s, `,"\\x`)
o += hex.Encode(s[o:], a[i])
s[o] = '"'
s = s[o+1:]
}
b[0] = '{'
b[size-1] = '}'
return string(b), nil
}
return "{}", nil
}
// Float64Array represents a one-dimensional array of the PostgreSQL double
// precision type.
type Float64Array []float64
// Scan implements the sql.Scanner interface.
func (a *Float64Array) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to Float64Array", src)
}
func (a *Float64Array) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "Float64Array")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Float64Array, len(elems))
for i, v := range elems {
if b[i], err = strconv.ParseFloat(string(v), 64); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a Float64Array) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+2*n)
b[0] = '{'
b = strconv.AppendFloat(b, a[0], 'f', -1, 64)
for i := 1; i < n; i++ {
b = append(b, ',')
b = strconv.AppendFloat(b, a[i], 'f', -1, 64)
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// Float32Array represents a one-dimensional array of the PostgreSQL double
// precision type.
type Float32Array []float32
// Scan implements the sql.Scanner interface.
func (a *Float32Array) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to Float32Array", src)
}
func (a *Float32Array) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "Float32Array")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Float32Array, len(elems))
for i, v := range elems {
var x float64
if x, err = strconv.ParseFloat(string(v), 32); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
b[i] = float32(x)
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a Float32Array) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+2*n)
b[0] = '{'
b = strconv.AppendFloat(b, float64(a[0]), 'f', -1, 32)
for i := 1; i < n; i++ {
b = append(b, ',')
b = strconv.AppendFloat(b, float64(a[i]), 'f', -1, 32)
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// GenericArray implements the driver.Valuer and sql.Scanner interfaces for
// an array or slice of any dimension.
type GenericArray struct{ A interface{} }
func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) {
var assign func([]byte, reflect.Value) error
var del = ","
// TODO calculate the assign function for other types
// TODO repeat this section on the element type of arrays or slices (multidimensional)
{
if reflect.PtrTo(rt).Implements(typeSQLScanner) {
// dest is always addressable because it is an element of a slice.
assign = func(src []byte, dest reflect.Value) (err error) {
ss := dest.Addr().Interface().(sql.Scanner)
if src == nil {
err = ss.Scan(nil)
} else {
err = ss.Scan(src)
}
return
}
goto FoundType
}
assign = func([]byte, reflect.Value) error {
return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt)
}
}
FoundType:
if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok {
del = ad.ArrayDelimiter()
}
return rt, assign, del
}
// Scan implements the sql.Scanner interface.
func (a GenericArray) Scan(src interface{}) error {
dpv := reflect.ValueOf(a.A)
switch {
case dpv.Kind() != reflect.Ptr:
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
case dpv.IsNil():
return fmt.Errorf("pq: destination %T is nil", a.A)
}
dv := dpv.Elem()
switch dv.Kind() {
case reflect.Slice:
case reflect.Array:
default:
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
}
switch src := src.(type) {
case []byte:
return a.scanBytes(src, dv)
case string:
return a.scanBytes([]byte(src), dv)
case nil:
if dv.Kind() == reflect.Slice {
dv.Set(reflect.Zero(dv.Type()))
return nil
}
}
return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type())
}
func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error {
dtype, assign, del := a.evaluateDestination(dv.Type().Elem())
dims, elems, err := parseArray(src, []byte(del))
if err != nil {
return err
}
// TODO allow multidimensional
if len(dims) > 1 {
return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented",
strings.Replace(fmt.Sprint(dims), " ", "][", -1))
}
// Treat a zero-dimensional array like an array with a single dimension of zero.
if len(dims) == 0 {
dims = append(dims, 0)
}
for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() {
switch rt.Kind() {
case reflect.Slice:
case reflect.Array:
if rt.Len() != dims[i] {
return fmt.Errorf("pq: cannot convert ARRAY%s to %s",
strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type())
}
default:
// TODO handle multidimensional
}
}
values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems))
for i, e := range elems {
if err := assign(e, values.Index(i)); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
// TODO handle multidimensional
switch dv.Kind() {
case reflect.Slice:
dv.Set(values.Slice(0, dims[0]))
case reflect.Array:
for i := 0; i < dims[0]; i++ {
dv.Index(i).Set(values.Index(i))
}
}
return nil
}
// Value implements the driver.Valuer interface.
func (a GenericArray) Value() (driver.Value, error) {
if a.A == nil {
return nil, nil
}
rv := reflect.ValueOf(a.A)
switch rv.Kind() {
case reflect.Slice:
if rv.IsNil() {
return nil, nil
}
case reflect.Array:
default:
return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A)
}
if n := rv.Len(); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 0, 1+2*n)
b, _, err := appendArray(b, rv, n)
return string(b), err
}
return "{}", nil
}
// Int64Array represents a one-dimensional array of the PostgreSQL integer types.
type Int64Array []int64
// Scan implements the sql.Scanner interface.
func (a *Int64Array) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to Int64Array", src)
}
func (a *Int64Array) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "Int64Array")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Int64Array, len(elems))
for i, v := range elems {
if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a Int64Array) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+2*n)
b[0] = '{'
b = strconv.AppendInt(b, a[0], 10)
for i := 1; i < n; i++ {
b = append(b, ',')
b = strconv.AppendInt(b, a[i], 10)
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// Int32Array represents a one-dimensional array of the PostgreSQL integer types.
type Int32Array []int32
// Scan implements the sql.Scanner interface.
func (a *Int32Array) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to Int32Array", src)
}
func (a *Int32Array) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "Int32Array")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Int32Array, len(elems))
for i, v := range elems {
x, err := strconv.ParseInt(string(v), 10, 32)
if err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
b[i] = int32(x)
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a Int32Array) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+2*n)
b[0] = '{'
b = strconv.AppendInt(b, int64(a[0]), 10)
for i := 1; i < n; i++ {
b = append(b, ',')
b = strconv.AppendInt(b, int64(a[i]), 10)
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// StringArray represents a one-dimensional array of the PostgreSQL character types.
type StringArray []string
// Scan implements the sql.Scanner interface.
func (a *StringArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to StringArray", src)
}
func (a *StringArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "StringArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(StringArray, len(elems))
for i, v := range elems {
if b[i] = string(v); v == nil {
return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a StringArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, 2*N bytes of quotes,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+3*n)
b[0] = '{'
b = appendArrayQuotedBytes(b, []byte(a[0]))
for i := 1; i < n; i++ {
b = append(b, ',')
b = appendArrayQuotedBytes(b, []byte(a[i]))
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// appendArray appends rv to the buffer, returning the extended buffer and
// the delimiter used between elements.
//
// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice.
func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) {
var del string
var err error
b = append(b, '{')
if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil {
return b, del, err
}
for i := 1; i < n; i++ {
b = append(b, del...)
if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil {
return b, del, err
}
}
return append(b, '}'), del, nil
}
// appendArrayElement appends rv to the buffer, returning the extended buffer
// and the delimiter to use before the next element.
//
// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted
// using driver.DefaultParameterConverter and the resulting []byte or string
// is double-quoted.
//
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) {
if k := rv.Kind(); k == reflect.Array || k == reflect.Slice {
if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) {
if n := rv.Len(); n > 0 {
return appendArray(b, rv, n)
}
return b, "", nil
}
}
var del = ","
var err error
var iv interface{} = rv.Interface()
if ad, ok := iv.(ArrayDelimiter); ok {
del = ad.ArrayDelimiter()
}
if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil {
return b, del, err
}
switch v := iv.(type) {
case nil:
return append(b, "NULL"...), del, nil
case []byte:
return appendArrayQuotedBytes(b, v), del, nil
case string:
return appendArrayQuotedBytes(b, []byte(v)), del, nil
}
b, err = appendValue(b, iv)
return b, del, err
}
func appendArrayQuotedBytes(b, v []byte) []byte {
b = append(b, '"')
for {
i := bytes.IndexAny(v, `"\`)
if i < 0 {
b = append(b, v...)
break
}
if i > 0 {
b = append(b, v[:i]...)
}
b = append(b, '\\', v[i])
v = v[i+1:]
}
return append(b, '"')
}
func appendValue(b []byte, v driver.Value) ([]byte, error) {
return append(b, encode(nil, v, 0)...), nil
}
// parseArray extracts the dimensions and elements of an array represented in
// text format. Only representations emitted by the backend are supported.
// Notably, whitespace around brackets and delimiters is significant, and NULL
// is case-sensitive.
//
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) {
var depth, i int
if len(src) < 1 || src[0] != '{' {
return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0)
}
Open:
for i < len(src) {
switch src[i] {
case '{':
depth++
i++
case '}':
elems = make([][]byte, 0)
goto Close
default:
break Open
}
}
dims = make([]int, i)
Element:
for i < len(src) {
switch src[i] {
case '{':
if depth == len(dims) {
break Element
}
depth++
dims[depth-1] = 0
i++
case '"':
var elem = []byte{}
var escape bool
for i++; i < len(src); i++ {
if escape {
elem = append(elem, src[i])
escape = false
} else {
switch src[i] {
default:
elem = append(elem, src[i])
case '\\':
escape = true
case '"':
elems = append(elems, elem)
i++
break Element
}
}
}
default:
for start := i; i < len(src); i++ {
if bytes.HasPrefix(src[i:], del) || src[i] == '}' {
elem := src[start:i]
if len(elem) == 0 {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
if bytes.Equal(elem, []byte("NULL")) {
elem = nil
}
elems = append(elems, elem)
break Element
}
}
}
}
for i < len(src) {
if bytes.HasPrefix(src[i:], del) && depth > 0 {
dims[depth-1]++
i += len(del)
goto Element
} else if src[i] == '}' && depth > 0 {
dims[depth-1]++
depth--
i++
} else {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
}
Close:
for i < len(src) {
if src[i] == '}' && depth > 0 {
depth--
i++
} else {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
}
if depth > 0 {
err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i)
}
if err == nil {
for _, d := range dims {
if (len(elems) % d) != 0 {
err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions")
}
}
}
return
}
func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) {
dims, elems, err := parseArray(src, del)
if err != nil {
return nil, err
}
if len(dims) > 1 {
return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ)
}
return elems, err
}

91
vendor/github.com/lib/pq/buf.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
package pq
import (
"bytes"
"encoding/binary"
"github.com/lib/pq/oid"
)
type readBuf []byte
func (b *readBuf) int32() (n int) {
n = int(int32(binary.BigEndian.Uint32(*b)))
*b = (*b)[4:]
return
}
func (b *readBuf) oid() (n oid.Oid) {
n = oid.Oid(binary.BigEndian.Uint32(*b))
*b = (*b)[4:]
return
}
// N.B: this is actually an unsigned 16-bit integer, unlike int32
func (b *readBuf) int16() (n int) {
n = int(binary.BigEndian.Uint16(*b))
*b = (*b)[2:]
return
}
func (b *readBuf) string() string {
i := bytes.IndexByte(*b, 0)
if i < 0 {
errorf("invalid message format; expected string terminator")
}
s := (*b)[:i]
*b = (*b)[i+1:]
return string(s)
}
func (b *readBuf) next(n int) (v []byte) {
v = (*b)[:n]
*b = (*b)[n:]
return
}
func (b *readBuf) byte() byte {
return b.next(1)[0]
}
type writeBuf struct {
buf []byte
pos int
}
func (b *writeBuf) int32(n int) {
x := make([]byte, 4)
binary.BigEndian.PutUint32(x, uint32(n))
b.buf = append(b.buf, x...)
}
func (b *writeBuf) int16(n int) {
x := make([]byte, 2)
binary.BigEndian.PutUint16(x, uint16(n))
b.buf = append(b.buf, x...)
}
func (b *writeBuf) string(s string) {
b.buf = append(append(b.buf, s...), '\000')
}
func (b *writeBuf) byte(c byte) {
b.buf = append(b.buf, c)
}
func (b *writeBuf) bytes(v []byte) {
b.buf = append(b.buf, v...)
}
func (b *writeBuf) wrap() []byte {
p := b.buf[b.pos:]
binary.BigEndian.PutUint32(p, uint32(len(p)))
return b.buf
}
func (b *writeBuf) next(c byte) {
p := b.buf[b.pos:]
binary.BigEndian.PutUint32(p, uint32(len(p)))
b.pos = len(b.buf) + 1
b.buf = append(b.buf, c, 0, 0, 0, 0)
}

2112
vendor/github.com/lib/pq/conn.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

8
vendor/github.com/lib/pq/conn_go115.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
//go:build go1.15
// +build go1.15
package pq
import "database/sql/driver"
var _ driver.Validator = &conn{}

247
vendor/github.com/lib/pq/conn_go18.go generated vendored Normal file
View File

@ -0,0 +1,247 @@
package pq
import (
"context"
"database/sql"
"database/sql/driver"
"fmt"
"io"
"io/ioutil"
"time"
)
const (
watchCancelDialContextTimeout = time.Second * 10
)
// Implement the "QueryerContext" interface
func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
list := make([]driver.Value, len(args))
for i, nv := range args {
list[i] = nv.Value
}
finish := cn.watchCancel(ctx)
r, err := cn.query(query, list)
if err != nil {
if finish != nil {
finish()
}
return nil, err
}
r.finish = finish
return r, nil
}
// Implement the "ExecerContext" interface
func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
list := make([]driver.Value, len(args))
for i, nv := range args {
list[i] = nv.Value
}
if finish := cn.watchCancel(ctx); finish != nil {
defer finish()
}
return cn.Exec(query, list)
}
// Implement the "ConnPrepareContext" interface
func (cn *conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
if finish := cn.watchCancel(ctx); finish != nil {
defer finish()
}
return cn.Prepare(query)
}
// Implement the "ConnBeginTx" interface
func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
var mode string
switch sql.IsolationLevel(opts.Isolation) {
case sql.LevelDefault:
// Don't touch mode: use the server's default
case sql.LevelReadUncommitted:
mode = " ISOLATION LEVEL READ UNCOMMITTED"
case sql.LevelReadCommitted:
mode = " ISOLATION LEVEL READ COMMITTED"
case sql.LevelRepeatableRead:
mode = " ISOLATION LEVEL REPEATABLE READ"
case sql.LevelSerializable:
mode = " ISOLATION LEVEL SERIALIZABLE"
default:
return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation)
}
if opts.ReadOnly {
mode += " READ ONLY"
} else {
mode += " READ WRITE"
}
tx, err := cn.begin(mode)
if err != nil {
return nil, err
}
cn.txnFinish = cn.watchCancel(ctx)
return tx, nil
}
func (cn *conn) Ping(ctx context.Context) error {
if finish := cn.watchCancel(ctx); finish != nil {
defer finish()
}
rows, err := cn.simpleQuery(";")
if err != nil {
return driver.ErrBadConn // https://golang.org/pkg/database/sql/driver/#Pinger
}
rows.Close()
return nil
}
func (cn *conn) watchCancel(ctx context.Context) func() {
if done := ctx.Done(); done != nil {
finished := make(chan struct{}, 1)
go func() {
select {
case <-done:
select {
case finished <- struct{}{}:
default:
// We raced with the finish func, let the next query handle this with the
// context.
return
}
// Set the connection state to bad so it does not get reused.
cn.err.set(ctx.Err())
// At this point the function level context is canceled,
// so it must not be used for the additional network
// request to cancel the query.
// Create a new context to pass into the dial.
ctxCancel, cancel := context.WithTimeout(context.Background(), watchCancelDialContextTimeout)
defer cancel()
_ = cn.cancel(ctxCancel)
case <-finished:
}
}()
return func() {
select {
case <-finished:
cn.err.set(ctx.Err())
cn.Close()
case finished <- struct{}{}:
}
}
}
return nil
}
func (cn *conn) cancel(ctx context.Context) error {
// Create a new values map (copy). This makes sure the connection created
// in this method cannot write to the same underlying data, which could
// cause a concurrent map write panic. This is necessary because cancel
// is called from a goroutine in watchCancel.
o := make(values)
for k, v := range cn.opts {
o[k] = v
}
c, err := dial(ctx, cn.dialer, o)
if err != nil {
return err
}
defer c.Close()
{
can := conn{
c: c,
}
err = can.ssl(o)
if err != nil {
return err
}
w := can.writeBuf(0)
w.int32(80877102) // cancel request code
w.int32(cn.processID)
w.int32(cn.secretKey)
if err := can.sendStartupPacket(w); err != nil {
return err
}
}
// Read until EOF to ensure that the server received the cancel.
{
_, err := io.Copy(ioutil.Discard, c)
return err
}
}
// Implement the "StmtQueryContext" interface
func (st *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
list := make([]driver.Value, len(args))
for i, nv := range args {
list[i] = nv.Value
}
finish := st.watchCancel(ctx)
r, err := st.query(list)
if err != nil {
if finish != nil {
finish()
}
return nil, err
}
r.finish = finish
return r, nil
}
// Implement the "StmtExecContext" interface
func (st *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
list := make([]driver.Value, len(args))
for i, nv := range args {
list[i] = nv.Value
}
if finish := st.watchCancel(ctx); finish != nil {
defer finish()
}
return st.Exec(list)
}
// watchCancel is implemented on stmt in order to not mark the parent conn as bad
func (st *stmt) watchCancel(ctx context.Context) func() {
if done := ctx.Done(); done != nil {
finished := make(chan struct{})
go func() {
select {
case <-done:
// At this point the function level context is canceled,
// so it must not be used for the additional network
// request to cancel the query.
// Create a new context to pass into the dial.
ctxCancel, cancel := context.WithTimeout(context.Background(), watchCancelDialContextTimeout)
defer cancel()
_ = st.cancel(ctxCancel)
finished <- struct{}{}
case <-finished:
}
}()
return func() {
select {
case <-finished:
case finished <- struct{}{}:
}
}
}
return nil
}
func (st *stmt) cancel(ctx context.Context) error {
return st.cn.cancel(ctx)
}

120
vendor/github.com/lib/pq/connector.go generated vendored Normal file
View File

@ -0,0 +1,120 @@
package pq
import (
"context"
"database/sql/driver"
"errors"
"fmt"
"os"
"strings"
)
// Connector represents a fixed configuration for the pq driver with a given
// name. Connector satisfies the database/sql/driver Connector interface and
// can be used to create any number of DB Conn's via the database/sql OpenDB
// function.
//
// See https://golang.org/pkg/database/sql/driver/#Connector.
// See https://golang.org/pkg/database/sql/#OpenDB.
type Connector struct {
opts values
dialer Dialer
}
// Connect returns a connection to the database using the fixed configuration
// of this Connector. Context is not used.
func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) {
return c.open(ctx)
}
// Dialer allows change the dialer used to open connections.
func (c *Connector) Dialer(dialer Dialer) {
c.dialer = dialer
}
// Driver returns the underlying driver of this Connector.
func (c *Connector) Driver() driver.Driver {
return &Driver{}
}
// NewConnector returns a connector for the pq driver in a fixed configuration
// with the given dsn. The returned connector can be used to create any number
// of equivalent Conn's. The returned connector is intended to be used with
// database/sql.OpenDB.
//
// See https://golang.org/pkg/database/sql/driver/#Connector.
// See https://golang.org/pkg/database/sql/#OpenDB.
func NewConnector(dsn string) (*Connector, error) {
var err error
o := make(values)
// A number of defaults are applied here, in this order:
//
// * Very low precedence defaults applied in every situation
// * Environment variables
// * Explicitly passed connection information
o["host"] = "localhost"
o["port"] = "5432"
// N.B.: Extra float digits should be set to 3, but that breaks
// Postgres 8.4 and older, where the max is 2.
o["extra_float_digits"] = "2"
for k, v := range parseEnviron(os.Environ()) {
o[k] = v
}
if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") {
dsn, err = ParseURL(dsn)
if err != nil {
return nil, err
}
}
if err := parseOpts(dsn, o); err != nil {
return nil, err
}
// Use the "fallback" application name if necessary
if fallback, ok := o["fallback_application_name"]; ok {
if _, ok := o["application_name"]; !ok {
o["application_name"] = fallback
}
}
// We can't work with any client_encoding other than UTF-8 currently.
// However, we have historically allowed the user to set it to UTF-8
// explicitly, and there's no reason to break such programs, so allow that.
// Note that the "options" setting could also set client_encoding, but
// parsing its value is not worth it. Instead, we always explicitly send
// client_encoding as a separate run-time parameter, which should override
// anything set in options.
if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) {
return nil, errors.New("client_encoding must be absent or 'UTF8'")
}
o["client_encoding"] = "UTF8"
// DateStyle needs a similar treatment.
if datestyle, ok := o["datestyle"]; ok {
if datestyle != "ISO, MDY" {
return nil, fmt.Errorf("setting datestyle must be absent or %v; got %v", "ISO, MDY", datestyle)
}
} else {
o["datestyle"] = "ISO, MDY"
}
// If a user is not provided by any other means, the last
// resort is to use the current operating system provided user
// name.
if _, ok := o["user"]; !ok {
u, err := userCurrent()
if err != nil {
return nil, err
}
o["user"] = u
}
// SSL is not necessary or supported over UNIX domain sockets
if network, _ := network(o); network == "unix" {
o["sslmode"] = "disable"
}
return &Connector{opts: o, dialer: defaultDialer{}}, nil
}

348
vendor/github.com/lib/pq/copy.go generated vendored Normal file
View File

@ -0,0 +1,348 @@
package pq
import (
"bytes"
"context"
"database/sql/driver"
"encoding/binary"
"errors"
"fmt"
"sync"
)
var (
errCopyInClosed = errors.New("pq: copyin statement has already been closed")
errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY")
errCopyToNotSupported = errors.New("pq: COPY TO is not supported")
errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction")
errCopyInProgress = errors.New("pq: COPY in progress")
)
// CopyIn creates a COPY FROM statement which can be prepared with
// Tx.Prepare(). The target table should be visible in search_path.
func CopyIn(table string, columns ...string) string {
buffer := bytes.NewBufferString("COPY ")
BufferQuoteIdentifier(table, buffer)
buffer.WriteString(" (")
makeStmt(buffer, columns...)
return buffer.String()
}
// MakeStmt makes the stmt string for CopyIn and CopyInSchema.
func makeStmt(buffer *bytes.Buffer, columns ...string) {
//s := bytes.NewBufferString()
for i, col := range columns {
if i != 0 {
buffer.WriteString(", ")
}
BufferQuoteIdentifier(col, buffer)
}
buffer.WriteString(") FROM STDIN")
}
// CopyInSchema creates a COPY FROM statement which can be prepared with
// Tx.Prepare().
func CopyInSchema(schema, table string, columns ...string) string {
buffer := bytes.NewBufferString("COPY ")
BufferQuoteIdentifier(schema, buffer)
buffer.WriteRune('.')
BufferQuoteIdentifier(table, buffer)
buffer.WriteString(" (")
makeStmt(buffer, columns...)
return buffer.String()
}
type copyin struct {
cn *conn
buffer []byte
rowData chan []byte
done chan bool
closed bool
mu struct {
sync.Mutex
err error
driver.Result
}
}
const ciBufferSize = 64 * 1024
// flush buffer before the buffer is filled up and needs reallocation
const ciBufferFlushSize = 63 * 1024
func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) {
if !cn.isInTransaction() {
return nil, errCopyNotSupportedOutsideTxn
}
ci := &copyin{
cn: cn,
buffer: make([]byte, 0, ciBufferSize),
rowData: make(chan []byte),
done: make(chan bool, 1),
}
// add CopyData identifier + 4 bytes for message length
ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0)
b := cn.writeBuf('Q')
b.string(q)
cn.send(b)
awaitCopyInResponse:
for {
t, r := cn.recv1()
switch t {
case 'G':
if r.byte() != 0 {
err = errBinaryCopyNotSupported
break awaitCopyInResponse
}
go ci.resploop()
return ci, nil
case 'H':
err = errCopyToNotSupported
break awaitCopyInResponse
case 'E':
err = parseError(r)
case 'Z':
if err == nil {
ci.setBad(driver.ErrBadConn)
errorf("unexpected ReadyForQuery in response to COPY")
}
cn.processReadyForQuery(r)
return nil, err
default:
ci.setBad(driver.ErrBadConn)
errorf("unknown response for copy query: %q", t)
}
}
// something went wrong, abort COPY before we return
b = cn.writeBuf('f')
b.string(err.Error())
cn.send(b)
for {
t, r := cn.recv1()
switch t {
case 'c', 'C', 'E':
case 'Z':
// correctly aborted, we're done
cn.processReadyForQuery(r)
return nil, err
default:
ci.setBad(driver.ErrBadConn)
errorf("unknown response for CopyFail: %q", t)
}
}
}
func (ci *copyin) flush(buf []byte) {
// set message length (without message identifier)
binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1))
_, err := ci.cn.c.Write(buf)
if err != nil {
panic(err)
}
}
func (ci *copyin) resploop() {
for {
var r readBuf
t, err := ci.cn.recvMessage(&r)
if err != nil {
ci.setBad(driver.ErrBadConn)
ci.setError(err)
ci.done <- true
return
}
switch t {
case 'C':
// complete
res, _ := ci.cn.parseComplete(r.string())
ci.setResult(res)
case 'N':
if n := ci.cn.noticeHandler; n != nil {
n(parseError(&r))
}
case 'Z':
ci.cn.processReadyForQuery(&r)
ci.done <- true
return
case 'E':
err := parseError(&r)
ci.setError(err)
default:
ci.setBad(driver.ErrBadConn)
ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t))
ci.done <- true
return
}
}
}
func (ci *copyin) setBad(err error) {
ci.cn.err.set(err)
}
func (ci *copyin) getBad() error {
return ci.cn.err.get()
}
func (ci *copyin) err() error {
ci.mu.Lock()
err := ci.mu.err
ci.mu.Unlock()
return err
}
// setError() sets ci.err if one has not been set already. Caller must not be
// holding ci.Mutex.
func (ci *copyin) setError(err error) {
ci.mu.Lock()
if ci.mu.err == nil {
ci.mu.err = err
}
ci.mu.Unlock()
}
func (ci *copyin) setResult(result driver.Result) {
ci.mu.Lock()
ci.mu.Result = result
ci.mu.Unlock()
}
func (ci *copyin) getResult() driver.Result {
ci.mu.Lock()
result := ci.mu.Result
ci.mu.Unlock()
if result == nil {
return driver.RowsAffected(0)
}
return result
}
func (ci *copyin) NumInput() int {
return -1
}
func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {
return nil, ErrNotSupported
}
// Exec inserts values into the COPY stream. The insert is asynchronous
// and Exec can return errors from previous Exec calls to the same
// COPY stmt.
//
// You need to call Exec(nil) to sync the COPY stream and to get any
// errors from pending data, since Stmt.Close() doesn't return errors
// to the user.
func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
if ci.closed {
return nil, errCopyInClosed
}
if err := ci.getBad(); err != nil {
return nil, err
}
defer ci.cn.errRecover(&err)
if err := ci.err(); err != nil {
return nil, err
}
if len(v) == 0 {
if err := ci.Close(); err != nil {
return driver.RowsAffected(0), err
}
return ci.getResult(), nil
}
numValues := len(v)
for i, value := range v {
ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value)
if i < numValues-1 {
ci.buffer = append(ci.buffer, '\t')
}
}
ci.buffer = append(ci.buffer, '\n')
if len(ci.buffer) > ciBufferFlushSize {
ci.flush(ci.buffer)
// reset buffer, keep bytes for message identifier and length
ci.buffer = ci.buffer[:5]
}
return driver.RowsAffected(0), nil
}
// CopyData inserts a raw string into the COPY stream. The insert is
// asynchronous and CopyData can return errors from previous CopyData calls to
// the same COPY stmt.
//
// You need to call Exec(nil) to sync the COPY stream and to get any
// errors from pending data, since Stmt.Close() doesn't return errors
// to the user.
func (ci *copyin) CopyData(ctx context.Context, line string) (r driver.Result, err error) {
if ci.closed {
return nil, errCopyInClosed
}
if finish := ci.cn.watchCancel(ctx); finish != nil {
defer finish()
}
if err := ci.getBad(); err != nil {
return nil, err
}
defer ci.cn.errRecover(&err)
if err := ci.err(); err != nil {
return nil, err
}
ci.buffer = append(ci.buffer, []byte(line)...)
ci.buffer = append(ci.buffer, '\n')
if len(ci.buffer) > ciBufferFlushSize {
ci.flush(ci.buffer)
// reset buffer, keep bytes for message identifier and length
ci.buffer = ci.buffer[:5]
}
return driver.RowsAffected(0), nil
}
func (ci *copyin) Close() (err error) {
if ci.closed { // Don't do anything, we're already closed
return nil
}
ci.closed = true
if err := ci.getBad(); err != nil {
return err
}
defer ci.cn.errRecover(&err)
if len(ci.buffer) > 0 {
ci.flush(ci.buffer)
}
// Avoid touching the scratch buffer as resploop could be using it.
err = ci.cn.sendSimpleMessage('c')
if err != nil {
return err
}
<-ci.done
ci.cn.inCopy = false
if err := ci.err(); err != nil {
return err
}
return nil
}

268
vendor/github.com/lib/pq/doc.go generated vendored Normal file
View File

@ -0,0 +1,268 @@
/*
Package pq is a pure Go Postgres driver for the database/sql package.
In most cases clients will use the database/sql package instead of
using this package directly. For example:
import (
"database/sql"
_ "github.com/lib/pq"
)
func main() {
connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full"
db, err := sql.Open("postgres", connStr)
if err != nil {
log.Fatal(err)
}
age := 21
rows, err := db.Query("SELECT name FROM users WHERE age = $1", age)
}
You can also connect to a database using a URL. For example:
connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full"
db, err := sql.Open("postgres", connStr)
Connection String Parameters
Similarly to libpq, when establishing a connection using pq you are expected to
supply a connection string containing zero or more parameters.
A subset of the connection parameters supported by libpq are also supported by pq.
Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem)
directly in the connection string. This is different from libpq, which does not allow
run-time parameters in the connection string, instead requiring you to supply
them in the options parameter.
For compatibility with libpq, the following special connection parameters are
supported:
* dbname - The name of the database to connect to
* user - The user to sign in as
* password - The user's password
* host - The host to connect to. Values that start with / are for unix
domain sockets. (default is localhost)
* port - The port to bind to. (default is 5432)
* sslmode - Whether or not to use SSL (default is require, this is not
the default for libpq)
* fallback_application_name - An application_name to fall back to if one isn't provided.
* connect_timeout - Maximum wait for connection, in seconds. Zero or
not specified means wait indefinitely.
* sslcert - Cert file location. The file must contain PEM encoded data.
* sslkey - Key file location. The file must contain PEM encoded data.
* sslrootcert - The location of the root certificate file. The file
must contain PEM encoded data.
Valid values for sslmode are:
* disable - No SSL
* require - Always SSL (skip verification)
* verify-ca - Always SSL (verify that the certificate presented by the
server was signed by a trusted CA)
* verify-full - Always SSL (verify that the certification presented by
the server was signed by a trusted CA and the server host name
matches the one in the certificate)
See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
for more information about connection string parameters.
Use single quotes for values that contain whitespace:
"user=pqgotest password='with spaces'"
A backslash will escape the next character in values:
"user=space\ man password='it\'s valid'"
Note that the connection parameter client_encoding (which sets the
text encoding for the connection) may be set but must be "UTF8",
matching with the same rules as Postgres. It is an error to provide
any other value.
In addition to the parameters listed above, any run-time parameter that can be
set at backend start time can be set in the connection string. For more
information, see
http://www.postgresql.org/docs/current/static/runtime-config.html.
Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html
supported by libpq are also supported by pq. If any of the environment
variables not supported by pq are set, pq will panic during connection
establishment. Environment variables have a lower precedence than explicitly
provided connection parameters.
The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html
is supported, but on Windows PGPASSFILE must be specified explicitly.
Queries
database/sql does not dictate any specific format for parameter
markers in query strings, and pq uses the Postgres-native ordinal markers,
as shown above. The same marker can be reused for the same parameter:
rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1
OR age BETWEEN $2 AND $2 + 3`, "orange", 64)
pq does not support the LastInsertId() method of the Result type in database/sql.
To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres
RETURNING clause with a standard Query or QueryRow call:
var userid int
err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age)
VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid)
For more details on RETURNING, see the Postgres documentation:
http://www.postgresql.org/docs/current/static/sql-insert.html
http://www.postgresql.org/docs/current/static/sql-update.html
http://www.postgresql.org/docs/current/static/sql-delete.html
For additional instructions on querying see the documentation for the database/sql package.
Data Types
Parameters pass through driver.DefaultParameterConverter before they are handled
by this package. When the binary_parameters connection option is enabled,
[]byte values are sent directly to the backend as data in binary format.
This package returns the following types for values from the PostgreSQL backend:
- integer types smallint, integer, and bigint are returned as int64
- floating-point types real and double precision are returned as float64
- character types char, varchar, and text are returned as string
- temporal types date, time, timetz, timestamp, and timestamptz are
returned as time.Time
- the boolean type is returned as bool
- the bytea type is returned as []byte
All other types are returned directly from the backend as []byte values in text format.
Errors
pq may return errors of type *pq.Error which can be interrogated for error details:
if err, ok := err.(*pq.Error); ok {
fmt.Println("pq error:", err.Code.Name())
}
See the pq.Error type for details.
Bulk imports
You can perform bulk imports by preparing a statement returned by pq.CopyIn (or
pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement
handle can then be repeatedly "executed" to copy data into the target table.
After all data has been processed you should call Exec() once with no arguments
to flush all buffered data. Any call to Exec() might return an error which
should be handled appropriately, but because of the internal buffering an error
returned by Exec() might not be related to the data passed in the call that
failed.
CopyIn uses COPY FROM internally. It is not possible to COPY outside of an
explicit transaction in pq.
Usage example:
txn, err := db.Begin()
if err != nil {
log.Fatal(err)
}
stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age"))
if err != nil {
log.Fatal(err)
}
for _, user := range users {
_, err = stmt.Exec(user.Name, int64(user.Age))
if err != nil {
log.Fatal(err)
}
}
_, err = stmt.Exec()
if err != nil {
log.Fatal(err)
}
err = stmt.Close()
if err != nil {
log.Fatal(err)
}
err = txn.Commit()
if err != nil {
log.Fatal(err)
}
Notifications
PostgreSQL supports a simple publish/subscribe model over database
connections. See http://www.postgresql.org/docs/current/static/sql-notify.html
for more information about the general mechanism.
To start listening for notifications, you first have to open a new connection
to the database by calling NewListener. This connection can not be used for
anything other than LISTEN / NOTIFY. Calling Listen will open a "notification
channel"; once a notification channel is open, a notification generated on that
channel will effect a send on the Listener.Notify channel. A notification
channel will remain open until Unlisten is called, though connection loss might
result in some notifications being lost. To solve this problem, Listener sends
a nil pointer over the Notify channel any time the connection is re-established
following a connection loss. The application can get information about the
state of the underlying connection by setting an event callback in the call to
NewListener.
A single Listener can safely be used from concurrent goroutines, which means
that there is often no need to create more than one Listener in your
application. However, a Listener is always connected to a single database, so
you will need to create a new Listener instance for every database you want to
receive notifications in.
The channel name in both Listen and Unlisten is case sensitive, and can contain
any characters legal in an identifier (see
http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS
for more information). Note that the channel name will be truncated to 63
bytes by the PostgreSQL server.
You can find a complete, working example of Listener usage at
https://godoc.org/github.com/lib/pq/example/listen.
Kerberos Support
If you need support for Kerberos authentication, add the following to your main
package:
import "github.com/lib/pq/auth/kerberos"
func init() {
pq.RegisterGSSProvider(func() (pq.Gss, error) { return kerberos.NewGSS() })
}
This package is in a separate module so that users who don't need Kerberos
don't have to download unnecessary dependencies.
When imported, additional connection string parameters are supported:
* krbsrvname - GSS (Kerberos) service name when constructing the
SPN (default is `postgres`). This will be combined with the host
to form the full SPN: `krbsrvname/host`.
* krbspn - GSS (Kerberos) SPN. This takes priority over
`krbsrvname` if present.
*/
package pq

632
vendor/github.com/lib/pq/encode.go generated vendored Normal file
View File

@ -0,0 +1,632 @@
package pq
import (
"bytes"
"database/sql/driver"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"math"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/lib/pq/oid"
)
var time2400Regex = regexp.MustCompile(`^(24:00(?::00(?:\.0+)?)?)(?:[Z+-].*)?$`)
func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte {
switch v := x.(type) {
case []byte:
return v
default:
return encode(parameterStatus, x, oid.T_unknown)
}
}
func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte {
switch v := x.(type) {
case int64:
return strconv.AppendInt(nil, v, 10)
case float64:
return strconv.AppendFloat(nil, v, 'f', -1, 64)
case []byte:
if pgtypOid == oid.T_bytea {
return encodeBytea(parameterStatus.serverVersion, v)
}
return v
case string:
if pgtypOid == oid.T_bytea {
return encodeBytea(parameterStatus.serverVersion, []byte(v))
}
return []byte(v)
case bool:
return strconv.AppendBool(nil, v)
case time.Time:
return formatTs(v)
default:
errorf("encode: unknown type for %T", v)
}
panic("not reached")
}
func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} {
switch f {
case formatBinary:
return binaryDecode(parameterStatus, s, typ)
case formatText:
return textDecode(parameterStatus, s, typ)
default:
panic("not reached")
}
}
func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
switch typ {
case oid.T_bytea:
return s
case oid.T_int8:
return int64(binary.BigEndian.Uint64(s))
case oid.T_int4:
return int64(int32(binary.BigEndian.Uint32(s)))
case oid.T_int2:
return int64(int16(binary.BigEndian.Uint16(s)))
case oid.T_uuid:
b, err := decodeUUIDBinary(s)
if err != nil {
panic(err)
}
return b
default:
errorf("don't know how to decode binary parameter of type %d", uint32(typ))
}
panic("not reached")
}
func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
switch typ {
case oid.T_char, oid.T_varchar, oid.T_text:
return string(s)
case oid.T_bytea:
b, err := parseBytea(s)
if err != nil {
errorf("%s", err)
}
return b
case oid.T_timestamptz:
return parseTs(parameterStatus.currentLocation, string(s))
case oid.T_timestamp, oid.T_date:
return parseTs(nil, string(s))
case oid.T_time:
return mustParse("15:04:05", typ, s)
case oid.T_timetz:
return mustParse("15:04:05-07", typ, s)
case oid.T_bool:
return s[0] == 't'
case oid.T_int8, oid.T_int4, oid.T_int2:
i, err := strconv.ParseInt(string(s), 10, 64)
if err != nil {
errorf("%s", err)
}
return i
case oid.T_float4, oid.T_float8:
// We always use 64 bit parsing, regardless of whether the input text is for
// a float4 or float8, because clients expect float64s for all float datatypes
// and returning a 32-bit parsed float64 produces lossy results.
f, err := strconv.ParseFloat(string(s), 64)
if err != nil {
errorf("%s", err)
}
return f
}
return s
}
// appendEncodedText encodes item in text format as required by COPY
// and appends to buf
func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte {
switch v := x.(type) {
case int64:
return strconv.AppendInt(buf, v, 10)
case float64:
return strconv.AppendFloat(buf, v, 'f', -1, 64)
case []byte:
encodedBytea := encodeBytea(parameterStatus.serverVersion, v)
return appendEscapedText(buf, string(encodedBytea))
case string:
return appendEscapedText(buf, v)
case bool:
return strconv.AppendBool(buf, v)
case time.Time:
return append(buf, formatTs(v)...)
case nil:
return append(buf, "\\N"...)
default:
errorf("encode: unknown type for %T", v)
}
panic("not reached")
}
func appendEscapedText(buf []byte, text string) []byte {
escapeNeeded := false
startPos := 0
var c byte
// check if we need to escape
for i := 0; i < len(text); i++ {
c = text[i]
if c == '\\' || c == '\n' || c == '\r' || c == '\t' {
escapeNeeded = true
startPos = i
break
}
}
if !escapeNeeded {
return append(buf, text...)
}
// copy till first char to escape, iterate the rest
result := append(buf, text[:startPos]...)
for i := startPos; i < len(text); i++ {
c = text[i]
switch c {
case '\\':
result = append(result, '\\', '\\')
case '\n':
result = append(result, '\\', 'n')
case '\r':
result = append(result, '\\', 'r')
case '\t':
result = append(result, '\\', 't')
default:
result = append(result, c)
}
}
return result
}
func mustParse(f string, typ oid.Oid, s []byte) time.Time {
str := string(s)
// Check for a minute and second offset in the timezone.
if typ == oid.T_timestamptz || typ == oid.T_timetz {
for i := 3; i <= 6; i += 3 {
if str[len(str)-i] == ':' {
f += ":00"
continue
}
break
}
}
// Special case for 24:00 time.
// Unfortunately, golang does not parse 24:00 as a proper time.
// In this case, we want to try "round to the next day", to differentiate.
// As such, we find if the 24:00 time matches at the beginning; if so,
// we default it back to 00:00 but add a day later.
var is2400Time bool
switch typ {
case oid.T_timetz, oid.T_time:
if matches := time2400Regex.FindStringSubmatch(str); matches != nil {
// Concatenate timezone information at the back.
str = "00:00:00" + str[len(matches[1]):]
is2400Time = true
}
}
t, err := time.Parse(f, str)
if err != nil {
errorf("decode: %s", err)
}
if is2400Time {
t = t.Add(24 * time.Hour)
}
return t
}
var errInvalidTimestamp = errors.New("invalid timestamp")
type timestampParser struct {
err error
}
func (p *timestampParser) expect(str string, char byte, pos int) {
if p.err != nil {
return
}
if pos+1 > len(str) {
p.err = errInvalidTimestamp
return
}
if c := str[pos]; c != char && p.err == nil {
p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c)
}
}
func (p *timestampParser) mustAtoi(str string, begin int, end int) int {
if p.err != nil {
return 0
}
if begin < 0 || end < 0 || begin > end || end > len(str) {
p.err = errInvalidTimestamp
return 0
}
result, err := strconv.Atoi(str[begin:end])
if err != nil {
if p.err == nil {
p.err = fmt.Errorf("expected number; got '%v'", str)
}
return 0
}
return result
}
// The location cache caches the time zones typically used by the client.
type locationCache struct {
cache map[int]*time.Location
lock sync.Mutex
}
// All connections share the same list of timezones. Benchmarking shows that
// about 5% speed could be gained by putting the cache in the connection and
// losing the mutex, at the cost of a small amount of memory and a somewhat
// significant increase in code complexity.
var globalLocationCache = newLocationCache()
func newLocationCache() *locationCache {
return &locationCache{cache: make(map[int]*time.Location)}
}
// Returns the cached timezone for the specified offset, creating and caching
// it if necessary.
func (c *locationCache) getLocation(offset int) *time.Location {
c.lock.Lock()
defer c.lock.Unlock()
location, ok := c.cache[offset]
if !ok {
location = time.FixedZone("", offset)
c.cache[offset] = location
}
return location
}
var infinityTsEnabled = false
var infinityTsNegative time.Time
var infinityTsPositive time.Time
const (
infinityTsEnabledAlready = "pq: infinity timestamp enabled already"
infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive"
)
// EnableInfinityTs controls the handling of Postgres' "-infinity" and
// "infinity" "timestamp"s.
//
// If EnableInfinityTs is not called, "-infinity" and "infinity" will return
// []byte("-infinity") and []byte("infinity") respectively, and potentially
// cause error "sql: Scan error on column index 0: unsupported driver -> Scan
// pair: []uint8 -> *time.Time", when scanning into a time.Time value.
//
// Once EnableInfinityTs has been called, all connections created using this
// driver will decode Postgres' "-infinity" and "infinity" for "timestamp",
// "timestamp with time zone" and "date" types to the predefined minimum and
// maximum times, respectively. When encoding time.Time values, any time which
// equals or precedes the predefined minimum time will be encoded to
// "-infinity". Any values at or past the maximum time will similarly be
// encoded to "infinity".
//
// If EnableInfinityTs is called with negative >= positive, it will panic.
// Calling EnableInfinityTs after a connection has been established results in
// undefined behavior. If EnableInfinityTs is called more than once, it will
// panic.
func EnableInfinityTs(negative time.Time, positive time.Time) {
if infinityTsEnabled {
panic(infinityTsEnabledAlready)
}
if !negative.Before(positive) {
panic(infinityTsNegativeMustBeSmaller)
}
infinityTsEnabled = true
infinityTsNegative = negative
infinityTsPositive = positive
}
/*
* Testing might want to toggle infinityTsEnabled
*/
func disableInfinityTs() {
infinityTsEnabled = false
}
// This is a time function specific to the Postgres default DateStyle
// setting ("ISO, MDY"), the only one we currently support. This
// accounts for the discrepancies between the parsing available with
// time.Parse and the Postgres date formatting quirks.
func parseTs(currentLocation *time.Location, str string) interface{} {
switch str {
case "-infinity":
if infinityTsEnabled {
return infinityTsNegative
}
return []byte(str)
case "infinity":
if infinityTsEnabled {
return infinityTsPositive
}
return []byte(str)
}
t, err := ParseTimestamp(currentLocation, str)
if err != nil {
panic(err)
}
return t
}
// ParseTimestamp parses Postgres' text format. It returns a time.Time in
// currentLocation iff that time's offset agrees with the offset sent from the
// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the
// fixed offset offset provided by the Postgres server.
func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) {
p := timestampParser{}
monSep := strings.IndexRune(str, '-')
// this is Gregorian year, not ISO Year
// In Gregorian system, the year 1 BC is followed by AD 1
year := p.mustAtoi(str, 0, monSep)
daySep := monSep + 3
month := p.mustAtoi(str, monSep+1, daySep)
p.expect(str, '-', daySep)
timeSep := daySep + 3
day := p.mustAtoi(str, daySep+1, timeSep)
minLen := monSep + len("01-01") + 1
isBC := strings.HasSuffix(str, " BC")
if isBC {
minLen += 3
}
var hour, minute, second int
if len(str) > minLen {
p.expect(str, ' ', timeSep)
minSep := timeSep + 3
p.expect(str, ':', minSep)
hour = p.mustAtoi(str, timeSep+1, minSep)
secSep := minSep + 3
p.expect(str, ':', secSep)
minute = p.mustAtoi(str, minSep+1, secSep)
secEnd := secSep + 3
second = p.mustAtoi(str, secSep+1, secEnd)
}
remainderIdx := monSep + len("01-01 00:00:00") + 1
// Three optional (but ordered) sections follow: the
// fractional seconds, the time zone offset, and the BC
// designation. We set them up here and adjust the other
// offsets if the preceding sections exist.
nanoSec := 0
tzOff := 0
if remainderIdx < len(str) && str[remainderIdx] == '.' {
fracStart := remainderIdx + 1
fracOff := strings.IndexAny(str[fracStart:], "-+Z ")
if fracOff < 0 {
fracOff = len(str) - fracStart
}
fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff)
nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff))))
remainderIdx += fracOff + 1
}
if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') {
// time zone separator is always '-' or '+' or 'Z' (UTC is +00)
var tzSign int
switch c := str[tzStart]; c {
case '-':
tzSign = -1
case '+':
tzSign = +1
default:
return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c)
}
tzHours := p.mustAtoi(str, tzStart+1, tzStart+3)
remainderIdx += 3
var tzMin, tzSec int
if remainderIdx < len(str) && str[remainderIdx] == ':' {
tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
remainderIdx += 3
}
if remainderIdx < len(str) && str[remainderIdx] == ':' {
tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
remainderIdx += 3
}
tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec)
} else if tzStart < len(str) && str[tzStart] == 'Z' {
// time zone Z separator indicates UTC is +00
remainderIdx += 1
}
var isoYear int
if isBC {
isoYear = 1 - year
remainderIdx += 3
} else {
isoYear = year
}
if remainderIdx < len(str) {
return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:])
}
t := time.Date(isoYear, time.Month(month), day,
hour, minute, second, nanoSec,
globalLocationCache.getLocation(tzOff))
if currentLocation != nil {
// Set the location of the returned Time based on the session's
// TimeZone value, but only if the local time zone database agrees with
// the remote database on the offset.
lt := t.In(currentLocation)
_, newOff := lt.Zone()
if newOff == tzOff {
t = lt
}
}
return t, p.err
}
// formatTs formats t into a format postgres understands.
func formatTs(t time.Time) []byte {
if infinityTsEnabled {
// t <= -infinity : ! (t > -infinity)
if !t.After(infinityTsNegative) {
return []byte("-infinity")
}
// t >= infinity : ! (!t < infinity)
if !t.Before(infinityTsPositive) {
return []byte("infinity")
}
}
return FormatTimestamp(t)
}
// FormatTimestamp formats t into Postgres' text format for timestamps.
func FormatTimestamp(t time.Time) []byte {
// Need to send dates before 0001 A.D. with " BC" suffix, instead of the
// minus sign preferred by Go.
// Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on
bc := false
if t.Year() <= 0 {
// flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11"
t = t.AddDate((-t.Year())*2+1, 0, 0)
bc = true
}
b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00"))
_, offset := t.Zone()
offset %= 60
if offset != 0 {
// RFC3339Nano already printed the minus sign
if offset < 0 {
offset = -offset
}
b = append(b, ':')
if offset < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(offset), 10)
}
if bc {
b = append(b, " BC"...)
}
return b
}
// Parse a bytea value received from the server. Both "hex" and the legacy
// "escape" format are supported.
func parseBytea(s []byte) (result []byte, err error) {
if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) {
// bytea_output = hex
s = s[2:] // trim off leading "\\x"
result = make([]byte, hex.DecodedLen(len(s)))
_, err := hex.Decode(result, s)
if err != nil {
return nil, err
}
} else {
// bytea_output = escape
for len(s) > 0 {
if s[0] == '\\' {
// escaped '\\'
if len(s) >= 2 && s[1] == '\\' {
result = append(result, '\\')
s = s[2:]
continue
}
// '\\' followed by an octal number
if len(s) < 4 {
return nil, fmt.Errorf("invalid bytea sequence %v", s)
}
r, err := strconv.ParseUint(string(s[1:4]), 8, 8)
if err != nil {
return nil, fmt.Errorf("could not parse bytea value: %s", err.Error())
}
result = append(result, byte(r))
s = s[4:]
} else {
// We hit an unescaped, raw byte. Try to read in as many as
// possible in one go.
i := bytes.IndexByte(s, '\\')
if i == -1 {
result = append(result, s...)
break
}
result = append(result, s[:i]...)
s = s[i:]
}
}
}
return result, nil
}
func encodeBytea(serverVersion int, v []byte) (result []byte) {
if serverVersion >= 90000 {
// Use the hex format if we know that the server supports it
result = make([]byte, 2+hex.EncodedLen(len(v)))
result[0] = '\\'
result[1] = 'x'
hex.Encode(result[2:], v)
} else {
// .. or resort to "escape"
for _, b := range v {
if b == '\\' {
result = append(result, '\\', '\\')
} else if b < 0x20 || b > 0x7e {
result = append(result, []byte(fmt.Sprintf("\\%03o", b))...)
} else {
result = append(result, b)
}
}
}
return result
}
// NullTime represents a time.Time that may be null. NullTime implements the
// sql.Scanner interface so it can be used as a scan destination, similar to
// sql.NullString.
type NullTime struct {
Time time.Time
Valid bool // Valid is true if Time is not NULL
}
// Scan implements the Scanner interface.
func (nt *NullTime) Scan(value interface{}) error {
nt.Time, nt.Valid = value.(time.Time)
return nil
}
// Value implements the driver Valuer interface.
func (nt NullTime) Value() (driver.Value, error) {
if !nt.Valid {
return nil, nil
}
return nt.Time, nil
}

523
vendor/github.com/lib/pq/error.go generated vendored Normal file
View File

@ -0,0 +1,523 @@
package pq
import (
"database/sql/driver"
"fmt"
"io"
"net"
"runtime"
)
// Error severities
const (
Efatal = "FATAL"
Epanic = "PANIC"
Ewarning = "WARNING"
Enotice = "NOTICE"
Edebug = "DEBUG"
Einfo = "INFO"
Elog = "LOG"
)
// Error represents an error communicating with the server.
//
// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields
type Error struct {
Severity string
Code ErrorCode
Message string
Detail string
Hint string
Position string
InternalPosition string
InternalQuery string
Where string
Schema string
Table string
Column string
DataTypeName string
Constraint string
File string
Line string
Routine string
}
// ErrorCode is a five-character error code.
type ErrorCode string
// Name returns a more human friendly rendering of the error code, namely the
// "condition name".
//
// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for
// details.
func (ec ErrorCode) Name() string {
return errorCodeNames[ec]
}
// ErrorClass is only the class part of an error code.
type ErrorClass string
// Name returns the condition name of an error class. It is equivalent to the
// condition name of the "standard" error code (i.e. the one having the last
// three characters "000").
func (ec ErrorClass) Name() string {
return errorCodeNames[ErrorCode(ec+"000")]
}
// Class returns the error class, e.g. "28".
//
// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for
// details.
func (ec ErrorCode) Class() ErrorClass {
return ErrorClass(ec[0:2])
}
// errorCodeNames is a mapping between the five-character error codes and the
// human readable "condition names". It is derived from the list at
// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html
var errorCodeNames = map[ErrorCode]string{
// Class 00 - Successful Completion
"00000": "successful_completion",
// Class 01 - Warning
"01000": "warning",
"0100C": "dynamic_result_sets_returned",
"01008": "implicit_zero_bit_padding",
"01003": "null_value_eliminated_in_set_function",
"01007": "privilege_not_granted",
"01006": "privilege_not_revoked",
"01004": "string_data_right_truncation",
"01P01": "deprecated_feature",
// Class 02 - No Data (this is also a warning class per the SQL standard)
"02000": "no_data",
"02001": "no_additional_dynamic_result_sets_returned",
// Class 03 - SQL Statement Not Yet Complete
"03000": "sql_statement_not_yet_complete",
// Class 08 - Connection Exception
"08000": "connection_exception",
"08003": "connection_does_not_exist",
"08006": "connection_failure",
"08001": "sqlclient_unable_to_establish_sqlconnection",
"08004": "sqlserver_rejected_establishment_of_sqlconnection",
"08007": "transaction_resolution_unknown",
"08P01": "protocol_violation",
// Class 09 - Triggered Action Exception
"09000": "triggered_action_exception",
// Class 0A - Feature Not Supported
"0A000": "feature_not_supported",
// Class 0B - Invalid Transaction Initiation
"0B000": "invalid_transaction_initiation",
// Class 0F - Locator Exception
"0F000": "locator_exception",
"0F001": "invalid_locator_specification",
// Class 0L - Invalid Grantor
"0L000": "invalid_grantor",
"0LP01": "invalid_grant_operation",
// Class 0P - Invalid Role Specification
"0P000": "invalid_role_specification",
// Class 0Z - Diagnostics Exception
"0Z000": "diagnostics_exception",
"0Z002": "stacked_diagnostics_accessed_without_active_handler",
// Class 20 - Case Not Found
"20000": "case_not_found",
// Class 21 - Cardinality Violation
"21000": "cardinality_violation",
// Class 22 - Data Exception
"22000": "data_exception",
"2202E": "array_subscript_error",
"22021": "character_not_in_repertoire",
"22008": "datetime_field_overflow",
"22012": "division_by_zero",
"22005": "error_in_assignment",
"2200B": "escape_character_conflict",
"22022": "indicator_overflow",
"22015": "interval_field_overflow",
"2201E": "invalid_argument_for_logarithm",
"22014": "invalid_argument_for_ntile_function",
"22016": "invalid_argument_for_nth_value_function",
"2201F": "invalid_argument_for_power_function",
"2201G": "invalid_argument_for_width_bucket_function",
"22018": "invalid_character_value_for_cast",
"22007": "invalid_datetime_format",
"22019": "invalid_escape_character",
"2200D": "invalid_escape_octet",
"22025": "invalid_escape_sequence",
"22P06": "nonstandard_use_of_escape_character",
"22010": "invalid_indicator_parameter_value",
"22023": "invalid_parameter_value",
"2201B": "invalid_regular_expression",
"2201W": "invalid_row_count_in_limit_clause",
"2201X": "invalid_row_count_in_result_offset_clause",
"22009": "invalid_time_zone_displacement_value",
"2200C": "invalid_use_of_escape_character",
"2200G": "most_specific_type_mismatch",
"22004": "null_value_not_allowed",
"22002": "null_value_no_indicator_parameter",
"22003": "numeric_value_out_of_range",
"2200H": "sequence_generator_limit_exceeded",
"22026": "string_data_length_mismatch",
"22001": "string_data_right_truncation",
"22011": "substring_error",
"22027": "trim_error",
"22024": "unterminated_c_string",
"2200F": "zero_length_character_string",
"22P01": "floating_point_exception",
"22P02": "invalid_text_representation",
"22P03": "invalid_binary_representation",
"22P04": "bad_copy_file_format",
"22P05": "untranslatable_character",
"2200L": "not_an_xml_document",
"2200M": "invalid_xml_document",
"2200N": "invalid_xml_content",
"2200S": "invalid_xml_comment",
"2200T": "invalid_xml_processing_instruction",
// Class 23 - Integrity Constraint Violation
"23000": "integrity_constraint_violation",
"23001": "restrict_violation",
"23502": "not_null_violation",
"23503": "foreign_key_violation",
"23505": "unique_violation",
"23514": "check_violation",
"23P01": "exclusion_violation",
// Class 24 - Invalid Cursor State
"24000": "invalid_cursor_state",
// Class 25 - Invalid Transaction State
"25000": "invalid_transaction_state",
"25001": "active_sql_transaction",
"25002": "branch_transaction_already_active",
"25008": "held_cursor_requires_same_isolation_level",
"25003": "inappropriate_access_mode_for_branch_transaction",
"25004": "inappropriate_isolation_level_for_branch_transaction",
"25005": "no_active_sql_transaction_for_branch_transaction",
"25006": "read_only_sql_transaction",
"25007": "schema_and_data_statement_mixing_not_supported",
"25P01": "no_active_sql_transaction",
"25P02": "in_failed_sql_transaction",
// Class 26 - Invalid SQL Statement Name
"26000": "invalid_sql_statement_name",
// Class 27 - Triggered Data Change Violation
"27000": "triggered_data_change_violation",
// Class 28 - Invalid Authorization Specification
"28000": "invalid_authorization_specification",
"28P01": "invalid_password",
// Class 2B - Dependent Privilege Descriptors Still Exist
"2B000": "dependent_privilege_descriptors_still_exist",
"2BP01": "dependent_objects_still_exist",
// Class 2D - Invalid Transaction Termination
"2D000": "invalid_transaction_termination",
// Class 2F - SQL Routine Exception
"2F000": "sql_routine_exception",
"2F005": "function_executed_no_return_statement",
"2F002": "modifying_sql_data_not_permitted",
"2F003": "prohibited_sql_statement_attempted",
"2F004": "reading_sql_data_not_permitted",
// Class 34 - Invalid Cursor Name
"34000": "invalid_cursor_name",
// Class 38 - External Routine Exception
"38000": "external_routine_exception",
"38001": "containing_sql_not_permitted",
"38002": "modifying_sql_data_not_permitted",
"38003": "prohibited_sql_statement_attempted",
"38004": "reading_sql_data_not_permitted",
// Class 39 - External Routine Invocation Exception
"39000": "external_routine_invocation_exception",
"39001": "invalid_sqlstate_returned",
"39004": "null_value_not_allowed",
"39P01": "trigger_protocol_violated",
"39P02": "srf_protocol_violated",
// Class 3B - Savepoint Exception
"3B000": "savepoint_exception",
"3B001": "invalid_savepoint_specification",
// Class 3D - Invalid Catalog Name
"3D000": "invalid_catalog_name",
// Class 3F - Invalid Schema Name
"3F000": "invalid_schema_name",
// Class 40 - Transaction Rollback
"40000": "transaction_rollback",
"40002": "transaction_integrity_constraint_violation",
"40001": "serialization_failure",
"40003": "statement_completion_unknown",
"40P01": "deadlock_detected",
// Class 42 - Syntax Error or Access Rule Violation
"42000": "syntax_error_or_access_rule_violation",
"42601": "syntax_error",
"42501": "insufficient_privilege",
"42846": "cannot_coerce",
"42803": "grouping_error",
"42P20": "windowing_error",
"42P19": "invalid_recursion",
"42830": "invalid_foreign_key",
"42602": "invalid_name",
"42622": "name_too_long",
"42939": "reserved_name",
"42804": "datatype_mismatch",
"42P18": "indeterminate_datatype",
"42P21": "collation_mismatch",
"42P22": "indeterminate_collation",
"42809": "wrong_object_type",
"42703": "undefined_column",
"42883": "undefined_function",
"42P01": "undefined_table",
"42P02": "undefined_parameter",
"42704": "undefined_object",
"42701": "duplicate_column",
"42P03": "duplicate_cursor",
"42P04": "duplicate_database",
"42723": "duplicate_function",
"42P05": "duplicate_prepared_statement",
"42P06": "duplicate_schema",
"42P07": "duplicate_table",
"42712": "duplicate_alias",
"42710": "duplicate_object",
"42702": "ambiguous_column",
"42725": "ambiguous_function",
"42P08": "ambiguous_parameter",
"42P09": "ambiguous_alias",
"42P10": "invalid_column_reference",
"42611": "invalid_column_definition",
"42P11": "invalid_cursor_definition",
"42P12": "invalid_database_definition",
"42P13": "invalid_function_definition",
"42P14": "invalid_prepared_statement_definition",
"42P15": "invalid_schema_definition",
"42P16": "invalid_table_definition",
"42P17": "invalid_object_definition",
// Class 44 - WITH CHECK OPTION Violation
"44000": "with_check_option_violation",
// Class 53 - Insufficient Resources
"53000": "insufficient_resources",
"53100": "disk_full",
"53200": "out_of_memory",
"53300": "too_many_connections",
"53400": "configuration_limit_exceeded",
// Class 54 - Program Limit Exceeded
"54000": "program_limit_exceeded",
"54001": "statement_too_complex",
"54011": "too_many_columns",
"54023": "too_many_arguments",
// Class 55 - Object Not In Prerequisite State
"55000": "object_not_in_prerequisite_state",
"55006": "object_in_use",
"55P02": "cant_change_runtime_param",
"55P03": "lock_not_available",
// Class 57 - Operator Intervention
"57000": "operator_intervention",
"57014": "query_canceled",
"57P01": "admin_shutdown",
"57P02": "crash_shutdown",
"57P03": "cannot_connect_now",
"57P04": "database_dropped",
// Class 58 - System Error (errors external to PostgreSQL itself)
"58000": "system_error",
"58030": "io_error",
"58P01": "undefined_file",
"58P02": "duplicate_file",
// Class F0 - Configuration File Error
"F0000": "config_file_error",
"F0001": "lock_file_exists",
// Class HV - Foreign Data Wrapper Error (SQL/MED)
"HV000": "fdw_error",
"HV005": "fdw_column_name_not_found",
"HV002": "fdw_dynamic_parameter_value_needed",
"HV010": "fdw_function_sequence_error",
"HV021": "fdw_inconsistent_descriptor_information",
"HV024": "fdw_invalid_attribute_value",
"HV007": "fdw_invalid_column_name",
"HV008": "fdw_invalid_column_number",
"HV004": "fdw_invalid_data_type",
"HV006": "fdw_invalid_data_type_descriptors",
"HV091": "fdw_invalid_descriptor_field_identifier",
"HV00B": "fdw_invalid_handle",
"HV00C": "fdw_invalid_option_index",
"HV00D": "fdw_invalid_option_name",
"HV090": "fdw_invalid_string_length_or_buffer_length",
"HV00A": "fdw_invalid_string_format",
"HV009": "fdw_invalid_use_of_null_pointer",
"HV014": "fdw_too_many_handles",
"HV001": "fdw_out_of_memory",
"HV00P": "fdw_no_schemas",
"HV00J": "fdw_option_name_not_found",
"HV00K": "fdw_reply_handle",
"HV00Q": "fdw_schema_not_found",
"HV00R": "fdw_table_not_found",
"HV00L": "fdw_unable_to_create_execution",
"HV00M": "fdw_unable_to_create_reply",
"HV00N": "fdw_unable_to_establish_connection",
// Class P0 - PL/pgSQL Error
"P0000": "plpgsql_error",
"P0001": "raise_exception",
"P0002": "no_data_found",
"P0003": "too_many_rows",
// Class XX - Internal Error
"XX000": "internal_error",
"XX001": "data_corrupted",
"XX002": "index_corrupted",
}
func parseError(r *readBuf) *Error {
err := new(Error)
for t := r.byte(); t != 0; t = r.byte() {
msg := r.string()
switch t {
case 'S':
err.Severity = msg
case 'C':
err.Code = ErrorCode(msg)
case 'M':
err.Message = msg
case 'D':
err.Detail = msg
case 'H':
err.Hint = msg
case 'P':
err.Position = msg
case 'p':
err.InternalPosition = msg
case 'q':
err.InternalQuery = msg
case 'W':
err.Where = msg
case 's':
err.Schema = msg
case 't':
err.Table = msg
case 'c':
err.Column = msg
case 'd':
err.DataTypeName = msg
case 'n':
err.Constraint = msg
case 'F':
err.File = msg
case 'L':
err.Line = msg
case 'R':
err.Routine = msg
}
}
return err
}
// Fatal returns true if the Error Severity is fatal.
func (err *Error) Fatal() bool {
return err.Severity == Efatal
}
// SQLState returns the SQLState of the error.
func (err *Error) SQLState() string {
return string(err.Code)
}
// Get implements the legacy PGError interface. New code should use the fields
// of the Error struct directly.
func (err *Error) Get(k byte) (v string) {
switch k {
case 'S':
return err.Severity
case 'C':
return string(err.Code)
case 'M':
return err.Message
case 'D':
return err.Detail
case 'H':
return err.Hint
case 'P':
return err.Position
case 'p':
return err.InternalPosition
case 'q':
return err.InternalQuery
case 'W':
return err.Where
case 's':
return err.Schema
case 't':
return err.Table
case 'c':
return err.Column
case 'd':
return err.DataTypeName
case 'n':
return err.Constraint
case 'F':
return err.File
case 'L':
return err.Line
case 'R':
return err.Routine
}
return ""
}
func (err *Error) Error() string {
return "pq: " + err.Message
}
// PGError is an interface used by previous versions of pq. It is provided
// only to support legacy code. New code should use the Error type.
type PGError interface {
Error() string
Fatal() bool
Get(k byte) (v string)
}
func errorf(s string, args ...interface{}) {
panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)))
}
// TODO(ainar-g) Rename to errorf after removing panics.
func fmterrorf(s string, args ...interface{}) error {
return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))
}
func errRecoverNoErrBadConn(err *error) {
e := recover()
if e == nil {
// Do nothing
return
}
var ok bool
*err, ok = e.(error)
if !ok {
*err = fmt.Errorf("pq: unexpected error: %#v", e)
}
}
func (cn *conn) errRecover(err *error) {
e := recover()
switch v := e.(type) {
case nil:
// Do nothing
case runtime.Error:
cn.err.set(driver.ErrBadConn)
panic(v)
case *Error:
if v.Fatal() {
*err = driver.ErrBadConn
} else {
*err = v
}
case *net.OpError:
cn.err.set(driver.ErrBadConn)
*err = v
case *safeRetryError:
cn.err.set(driver.ErrBadConn)
*err = driver.ErrBadConn
case error:
if v == io.EOF || v.Error() == "remote error: handshake failure" {
*err = driver.ErrBadConn
} else {
*err = v
}
default:
cn.err.set(driver.ErrBadConn)
panic(fmt.Sprintf("unknown error: %#v", e))
}
// Any time we return ErrBadConn, we need to remember it since *Tx doesn't
// mark the connection bad in database/sql.
if *err == driver.ErrBadConn {
cn.err.set(driver.ErrBadConn)
}
}

27
vendor/github.com/lib/pq/krb.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
package pq
// NewGSSFunc creates a GSS authentication provider, for use with
// RegisterGSSProvider.
type NewGSSFunc func() (GSS, error)
var newGss NewGSSFunc
// RegisterGSSProvider registers a GSS authentication provider. For example, if
// you need to use Kerberos to authenticate with your server, add this to your
// main package:
//
// import "github.com/lib/pq/auth/kerberos"
//
// func init() {
// pq.RegisterGSSProvider(func() (pq.GSS, error) { return kerberos.NewGSS() })
// }
func RegisterGSSProvider(newGssArg NewGSSFunc) {
newGss = newGssArg
}
// GSS provides GSSAPI authentication (e.g., Kerberos).
type GSS interface {
GetInitToken(host string, service string) ([]byte, error)
GetInitTokenFromSpn(spn string) ([]byte, error)
Continue(inToken []byte) (done bool, outToken []byte, err error)
}

72
vendor/github.com/lib/pq/notice.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
//go:build go1.10
// +build go1.10
package pq
import (
"context"
"database/sql/driver"
)
// NoticeHandler returns the notice handler on the given connection, if any. A
// runtime panic occurs if c is not a pq connection. This is rarely used
// directly, use ConnectorNoticeHandler and ConnectorWithNoticeHandler instead.
func NoticeHandler(c driver.Conn) func(*Error) {
return c.(*conn).noticeHandler
}
// SetNoticeHandler sets the given notice handler on the given connection. A
// runtime panic occurs if c is not a pq connection. A nil handler may be used
// to unset it. This is rarely used directly, use ConnectorNoticeHandler and
// ConnectorWithNoticeHandler instead.
//
// Note: Notice handlers are executed synchronously by pq meaning commands
// won't continue to be processed until the handler returns.
func SetNoticeHandler(c driver.Conn, handler func(*Error)) {
c.(*conn).noticeHandler = handler
}
// NoticeHandlerConnector wraps a regular connector and sets a notice handler
// on it.
type NoticeHandlerConnector struct {
driver.Connector
noticeHandler func(*Error)
}
// Connect calls the underlying connector's connect method and then sets the
// notice handler.
func (n *NoticeHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) {
c, err := n.Connector.Connect(ctx)
if err == nil {
SetNoticeHandler(c, n.noticeHandler)
}
return c, err
}
// ConnectorNoticeHandler returns the currently set notice handler, if any. If
// the given connector is not a result of ConnectorWithNoticeHandler, nil is
// returned.
func ConnectorNoticeHandler(c driver.Connector) func(*Error) {
if c, ok := c.(*NoticeHandlerConnector); ok {
return c.noticeHandler
}
return nil
}
// ConnectorWithNoticeHandler creates or sets the given handler for the given
// connector. If the given connector is a result of calling this function
// previously, it is simply set on the given connector and returned. Otherwise,
// this returns a new connector wrapping the given one and setting the notice
// handler. A nil notice handler may be used to unset it.
//
// The returned connector is intended to be used with database/sql.OpenDB.
//
// Note: Notice handlers are executed synchronously by pq meaning commands
// won't continue to be processed until the handler returns.
func ConnectorWithNoticeHandler(c driver.Connector, handler func(*Error)) *NoticeHandlerConnector {
if c, ok := c.(*NoticeHandlerConnector); ok {
c.noticeHandler = handler
return c
}
return &NoticeHandlerConnector{Connector: c, noticeHandler: handler}
}

858
vendor/github.com/lib/pq/notify.go generated vendored Normal file
View File

@ -0,0 +1,858 @@
package pq
// Package pq is a pure Go Postgres driver for the database/sql package.
// This module contains support for Postgres LISTEN/NOTIFY.
import (
"context"
"database/sql/driver"
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
)
// Notification represents a single notification from the database.
type Notification struct {
// Process ID (PID) of the notifying postgres backend.
BePid int
// Name of the channel the notification was sent on.
Channel string
// Payload, or the empty string if unspecified.
Extra string
}
func recvNotification(r *readBuf) *Notification {
bePid := r.int32()
channel := r.string()
extra := r.string()
return &Notification{bePid, channel, extra}
}
// SetNotificationHandler sets the given notification handler on the given
// connection. A runtime panic occurs if c is not a pq connection. A nil handler
// may be used to unset it.
//
// Note: Notification handlers are executed synchronously by pq meaning commands
// won't continue to be processed until the handler returns.
func SetNotificationHandler(c driver.Conn, handler func(*Notification)) {
c.(*conn).notificationHandler = handler
}
// NotificationHandlerConnector wraps a regular connector and sets a notification handler
// on it.
type NotificationHandlerConnector struct {
driver.Connector
notificationHandler func(*Notification)
}
// Connect calls the underlying connector's connect method and then sets the
// notification handler.
func (n *NotificationHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) {
c, err := n.Connector.Connect(ctx)
if err == nil {
SetNotificationHandler(c, n.notificationHandler)
}
return c, err
}
// ConnectorNotificationHandler returns the currently set notification handler, if any. If
// the given connector is not a result of ConnectorWithNotificationHandler, nil is
// returned.
func ConnectorNotificationHandler(c driver.Connector) func(*Notification) {
if c, ok := c.(*NotificationHandlerConnector); ok {
return c.notificationHandler
}
return nil
}
// ConnectorWithNotificationHandler creates or sets the given handler for the given
// connector. If the given connector is a result of calling this function
// previously, it is simply set on the given connector and returned. Otherwise,
// this returns a new connector wrapping the given one and setting the notification
// handler. A nil notification handler may be used to unset it.
//
// The returned connector is intended to be used with database/sql.OpenDB.
//
// Note: Notification handlers are executed synchronously by pq meaning commands
// won't continue to be processed until the handler returns.
func ConnectorWithNotificationHandler(c driver.Connector, handler func(*Notification)) *NotificationHandlerConnector {
if c, ok := c.(*NotificationHandlerConnector); ok {
c.notificationHandler = handler
return c
}
return &NotificationHandlerConnector{Connector: c, notificationHandler: handler}
}
const (
connStateIdle int32 = iota
connStateExpectResponse
connStateExpectReadyForQuery
)
type message struct {
typ byte
err error
}
var errListenerConnClosed = errors.New("pq: ListenerConn has been closed")
// ListenerConn is a low-level interface for waiting for notifications. You
// should use Listener instead.
type ListenerConn struct {
// guards cn and err
connectionLock sync.Mutex
cn *conn
err error
connState int32
// the sending goroutine will be holding this lock
senderLock sync.Mutex
notificationChan chan<- *Notification
replyChan chan message
}
// NewListenerConn creates a new ListenerConn. Use NewListener instead.
func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) {
return newDialListenerConn(defaultDialer{}, name, notificationChan)
}
func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) {
cn, err := DialOpen(d, name)
if err != nil {
return nil, err
}
l := &ListenerConn{
cn: cn.(*conn),
notificationChan: c,
connState: connStateIdle,
replyChan: make(chan message, 2),
}
go l.listenerConnMain()
return l, nil
}
// We can only allow one goroutine at a time to be running a query on the
// connection for various reasons, so the goroutine sending on the connection
// must be holding senderLock.
//
// Returns an error if an unrecoverable error has occurred and the ListenerConn
// should be abandoned.
func (l *ListenerConn) acquireSenderLock() error {
// we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery
l.senderLock.Lock()
l.connectionLock.Lock()
err := l.err
l.connectionLock.Unlock()
if err != nil {
l.senderLock.Unlock()
return err
}
return nil
}
func (l *ListenerConn) releaseSenderLock() {
l.senderLock.Unlock()
}
// setState advances the protocol state to newState. Returns false if moving
// to that state from the current state is not allowed.
func (l *ListenerConn) setState(newState int32) bool {
var expectedState int32
switch newState {
case connStateIdle:
expectedState = connStateExpectReadyForQuery
case connStateExpectResponse:
expectedState = connStateIdle
case connStateExpectReadyForQuery:
expectedState = connStateExpectResponse
default:
panic(fmt.Sprintf("unexpected listenerConnState %d", newState))
}
return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState)
}
// Main logic is here: receive messages from the postgres backend, forward
// notifications and query replies and keep the internal state in sync with the
// protocol state. Returns when the connection has been lost, is about to go
// away or should be discarded because we couldn't agree on the state with the
// server backend.
func (l *ListenerConn) listenerConnLoop() (err error) {
defer errRecoverNoErrBadConn(&err)
r := &readBuf{}
for {
t, err := l.cn.recvMessage(r)
if err != nil {
return err
}
switch t {
case 'A':
// recvNotification copies all the data so we don't need to worry
// about the scratch buffer being overwritten.
l.notificationChan <- recvNotification(r)
case 'T', 'D':
// only used by tests; ignore
case 'E':
// We might receive an ErrorResponse even when not in a query; it
// is expected that the server will close the connection after
// that, but we should make sure that the error we display is the
// one from the stray ErrorResponse, not io.ErrUnexpectedEOF.
if !l.setState(connStateExpectReadyForQuery) {
return parseError(r)
}
l.replyChan <- message{t, parseError(r)}
case 'C', 'I':
if !l.setState(connStateExpectReadyForQuery) {
// protocol out of sync
return fmt.Errorf("unexpected CommandComplete")
}
// ExecSimpleQuery doesn't need to know about this message
case 'Z':
if !l.setState(connStateIdle) {
// protocol out of sync
return fmt.Errorf("unexpected ReadyForQuery")
}
l.replyChan <- message{t, nil}
case 'S':
// ignore
case 'N':
if n := l.cn.noticeHandler; n != nil {
n(parseError(r))
}
default:
return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t)
}
}
}
// This is the main routine for the goroutine receiving on the database
// connection. Most of the main logic is in listenerConnLoop.
func (l *ListenerConn) listenerConnMain() {
err := l.listenerConnLoop()
// listenerConnLoop terminated; we're done, but we still have to clean up.
// Make sure nobody tries to start any new queries by making sure the err
// pointer is set. It is important that we do not overwrite its value; a
// connection could be closed by either this goroutine or one sending on
// the connection -- whoever closes the connection is assumed to have the
// more meaningful error message (as the other one will probably get
// net.errClosed), so that goroutine sets the error we expose while the
// other error is discarded. If the connection is lost while two
// goroutines are operating on the socket, it probably doesn't matter which
// error we expose so we don't try to do anything more complex.
l.connectionLock.Lock()
if l.err == nil {
l.err = err
}
l.cn.Close()
l.connectionLock.Unlock()
// There might be a query in-flight; make sure nobody's waiting for a
// response to it, since there's not going to be one.
close(l.replyChan)
// let the listener know we're done
close(l.notificationChan)
// this ListenerConn is done
}
// Listen sends a LISTEN query to the server. See ExecSimpleQuery.
func (l *ListenerConn) Listen(channel string) (bool, error) {
return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel))
}
// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery.
func (l *ListenerConn) Unlisten(channel string) (bool, error) {
return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel))
}
// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery.
func (l *ListenerConn) UnlistenAll() (bool, error) {
return l.ExecSimpleQuery("UNLISTEN *")
}
// Ping the remote server to make sure it's alive. Non-nil error means the
// connection has failed and should be abandoned.
func (l *ListenerConn) Ping() error {
sent, err := l.ExecSimpleQuery("")
if !sent {
return err
}
if err != nil {
// shouldn't happen
panic(err)
}
return nil
}
// Attempt to send a query on the connection. Returns an error if sending the
// query failed, and the caller should initiate closure of this connection.
// The caller must be holding senderLock (see acquireSenderLock and
// releaseSenderLock).
func (l *ListenerConn) sendSimpleQuery(q string) (err error) {
defer errRecoverNoErrBadConn(&err)
// must set connection state before sending the query
if !l.setState(connStateExpectResponse) {
panic("two queries running at the same time")
}
// Can't use l.cn.writeBuf here because it uses the scratch buffer which
// might get overwritten by listenerConnLoop.
b := &writeBuf{
buf: []byte("Q\x00\x00\x00\x00"),
pos: 1,
}
b.string(q)
l.cn.send(b)
return nil
}
// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable
// parameters) on the connection. The possible return values are:
// 1) "executed" is true; the query was executed to completion on the
// database server. If the query failed, err will be set to the error
// returned by the database, otherwise err will be nil.
// 2) If "executed" is false, the query could not be executed on the remote
// server. err will be non-nil.
//
// After a call to ExecSimpleQuery has returned an executed=false value, the
// connection has either been closed or will be closed shortly thereafter, and
// all subsequently executed queries will return an error.
func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) {
if err = l.acquireSenderLock(); err != nil {
return false, err
}
defer l.releaseSenderLock()
err = l.sendSimpleQuery(q)
if err != nil {
// We can't know what state the protocol is in, so we need to abandon
// this connection.
l.connectionLock.Lock()
// Set the error pointer if it hasn't been set already; see
// listenerConnMain.
if l.err == nil {
l.err = err
}
l.connectionLock.Unlock()
l.cn.c.Close()
return false, err
}
// now we just wait for a reply..
for {
m, ok := <-l.replyChan
if !ok {
// We lost the connection to server, don't bother waiting for a
// a response. err should have been set already.
l.connectionLock.Lock()
err := l.err
l.connectionLock.Unlock()
return false, err
}
switch m.typ {
case 'Z':
// sanity check
if m.err != nil {
panic("m.err != nil")
}
// done; err might or might not be set
return true, err
case 'E':
// sanity check
if m.err == nil {
panic("m.err == nil")
}
// server responded with an error; ReadyForQuery to follow
err = m.err
default:
return false, fmt.Errorf("unknown response for simple query: %q", m.typ)
}
}
}
// Close closes the connection.
func (l *ListenerConn) Close() error {
l.connectionLock.Lock()
if l.err != nil {
l.connectionLock.Unlock()
return errListenerConnClosed
}
l.err = errListenerConnClosed
l.connectionLock.Unlock()
// We can't send anything on the connection without holding senderLock.
// Simply close the net.Conn to wake up everyone operating on it.
return l.cn.c.Close()
}
// Err returns the reason the connection was closed. It is not safe to call
// this function until l.Notify has been closed.
func (l *ListenerConn) Err() error {
return l.err
}
var errListenerClosed = errors.New("pq: Listener has been closed")
// ErrChannelAlreadyOpen is returned from Listen when a channel is already
// open.
var ErrChannelAlreadyOpen = errors.New("pq: channel is already open")
// ErrChannelNotOpen is returned from Unlisten when a channel is not open.
var ErrChannelNotOpen = errors.New("pq: channel is not open")
// ListenerEventType is an enumeration of listener event types.
type ListenerEventType int
const (
// ListenerEventConnected is emitted only when the database connection
// has been initially initialized. The err argument of the callback
// will always be nil.
ListenerEventConnected ListenerEventType = iota
// ListenerEventDisconnected is emitted after a database connection has
// been lost, either because of an error or because Close has been
// called. The err argument will be set to the reason the database
// connection was lost.
ListenerEventDisconnected
// ListenerEventReconnected is emitted after a database connection has
// been re-established after connection loss. The err argument of the
// callback will always be nil. After this event has been emitted, a
// nil pq.Notification is sent on the Listener.Notify channel.
ListenerEventReconnected
// ListenerEventConnectionAttemptFailed is emitted after a connection
// to the database was attempted, but failed. The err argument will be
// set to an error describing why the connection attempt did not
// succeed.
ListenerEventConnectionAttemptFailed
)
// EventCallbackType is the event callback type. See also ListenerEventType
// constants' documentation.
type EventCallbackType func(event ListenerEventType, err error)
// Listener provides an interface for listening to notifications from a
// PostgreSQL database. For general usage information, see section
// "Notifications".
//
// Listener can safely be used from concurrently running goroutines.
type Listener struct {
// Channel for receiving notifications from the database. In some cases a
// nil value will be sent. See section "Notifications" above.
Notify chan *Notification
name string
minReconnectInterval time.Duration
maxReconnectInterval time.Duration
dialer Dialer
eventCallback EventCallbackType
lock sync.Mutex
isClosed bool
reconnectCond *sync.Cond
cn *ListenerConn
connNotificationChan <-chan *Notification
channels map[string]struct{}
}
// NewListener creates a new database connection dedicated to LISTEN / NOTIFY.
//
// name should be set to a connection string to be used to establish the
// database connection (see section "Connection String Parameters" above).
//
// minReconnectInterval controls the duration to wait before trying to
// re-establish the database connection after connection loss. After each
// consecutive failure this interval is doubled, until maxReconnectInterval is
// reached. Successfully completing the connection establishment procedure
// resets the interval back to minReconnectInterval.
//
// The last parameter eventCallback can be set to a function which will be
// called by the Listener when the state of the underlying database connection
// changes. This callback will be called by the goroutine which dispatches the
// notifications over the Notify channel, so you should try to avoid doing
// potentially time-consuming operations from the callback.
func NewListener(name string,
minReconnectInterval time.Duration,
maxReconnectInterval time.Duration,
eventCallback EventCallbackType) *Listener {
return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback)
}
// NewDialListener is like NewListener but it takes a Dialer.
func NewDialListener(d Dialer,
name string,
minReconnectInterval time.Duration,
maxReconnectInterval time.Duration,
eventCallback EventCallbackType) *Listener {
l := &Listener{
name: name,
minReconnectInterval: minReconnectInterval,
maxReconnectInterval: maxReconnectInterval,
dialer: d,
eventCallback: eventCallback,
channels: make(map[string]struct{}),
Notify: make(chan *Notification, 32),
}
l.reconnectCond = sync.NewCond(&l.lock)
go l.listenerMain()
return l
}
// NotificationChannel returns the notification channel for this listener.
// This is the same channel as Notify, and will not be recreated during the
// life time of the Listener.
func (l *Listener) NotificationChannel() <-chan *Notification {
return l.Notify
}
// Listen starts listening for notifications on a channel. Calls to this
// function will block until an acknowledgement has been received from the
// server. Note that Listener automatically re-establishes the connection
// after connection loss, so this function may block indefinitely if the
// connection can not be re-established.
//
// Listen will only fail in three conditions:
// 1) The channel is already open. The returned error will be
// ErrChannelAlreadyOpen.
// 2) The query was executed on the remote server, but PostgreSQL returned an
// error message in response to the query. The returned error will be a
// pq.Error containing the information the server supplied.
// 3) Close is called on the Listener before the request could be completed.
//
// The channel name is case-sensitive.
func (l *Listener) Listen(channel string) error {
l.lock.Lock()
defer l.lock.Unlock()
if l.isClosed {
return errListenerClosed
}
// The server allows you to issue a LISTEN on a channel which is already
// open, but it seems useful to be able to detect this case to spot for
// mistakes in application logic. If the application genuinely does't
// care, it can check the exported error and ignore it.
_, exists := l.channels[channel]
if exists {
return ErrChannelAlreadyOpen
}
if l.cn != nil {
// If gotResponse is true but error is set, the query was executed on
// the remote server, but resulted in an error. This should be
// relatively rare, so it's fine if we just pass the error to our
// caller. However, if gotResponse is false, we could not complete the
// query on the remote server and our underlying connection is about
// to go away, so we only add relname to l.channels, and wait for
// resync() to take care of the rest.
gotResponse, err := l.cn.Listen(channel)
if gotResponse && err != nil {
return err
}
}
l.channels[channel] = struct{}{}
for l.cn == nil {
l.reconnectCond.Wait()
// we let go of the mutex for a while
if l.isClosed {
return errListenerClosed
}
}
return nil
}
// Unlisten removes a channel from the Listener's channel list. Returns
// ErrChannelNotOpen if the Listener is not listening on the specified channel.
// Returns immediately with no error if there is no connection. Note that you
// might still get notifications for this channel even after Unlisten has
// returned.
//
// The channel name is case-sensitive.
func (l *Listener) Unlisten(channel string) error {
l.lock.Lock()
defer l.lock.Unlock()
if l.isClosed {
return errListenerClosed
}
// Similarly to LISTEN, this is not an error in Postgres, but it seems
// useful to distinguish from the normal conditions.
_, exists := l.channels[channel]
if !exists {
return ErrChannelNotOpen
}
if l.cn != nil {
// Similarly to Listen (see comment in that function), the caller
// should only be bothered with an error if it came from the backend as
// a response to our query.
gotResponse, err := l.cn.Unlisten(channel)
if gotResponse && err != nil {
return err
}
}
// Don't bother waiting for resync if there's no connection.
delete(l.channels, channel)
return nil
}
// UnlistenAll removes all channels from the Listener's channel list. Returns
// immediately with no error if there is no connection. Note that you might
// still get notifications for any of the deleted channels even after
// UnlistenAll has returned.
func (l *Listener) UnlistenAll() error {
l.lock.Lock()
defer l.lock.Unlock()
if l.isClosed {
return errListenerClosed
}
if l.cn != nil {
// Similarly to Listen (see comment in that function), the caller
// should only be bothered with an error if it came from the backend as
// a response to our query.
gotResponse, err := l.cn.UnlistenAll()
if gotResponse && err != nil {
return err
}
}
// Don't bother waiting for resync if there's no connection.
l.channels = make(map[string]struct{})
return nil
}
// Ping the remote server to make sure it's alive. Non-nil return value means
// that there is no active connection.
func (l *Listener) Ping() error {
l.lock.Lock()
defer l.lock.Unlock()
if l.isClosed {
return errListenerClosed
}
if l.cn == nil {
return errors.New("no connection")
}
return l.cn.Ping()
}
// Clean up after losing the server connection. Returns l.cn.Err(), which
// should have the reason the connection was lost.
func (l *Listener) disconnectCleanup() error {
l.lock.Lock()
defer l.lock.Unlock()
// sanity check; can't look at Err() until the channel has been closed
select {
case _, ok := <-l.connNotificationChan:
if ok {
panic("connNotificationChan not closed")
}
default:
panic("connNotificationChan not closed")
}
err := l.cn.Err()
l.cn.Close()
l.cn = nil
return err
}
// Synchronize the list of channels we want to be listening on with the server
// after the connection has been established.
func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error {
doneChan := make(chan error)
go func(notificationChan <-chan *Notification) {
for channel := range l.channels {
// If we got a response, return that error to our caller as it's
// going to be more descriptive than cn.Err().
gotResponse, err := cn.Listen(channel)
if gotResponse && err != nil {
doneChan <- err
return
}
// If we couldn't reach the server, wait for notificationChan to
// close and then return the error message from the connection, as
// per ListenerConn's interface.
if err != nil {
for range notificationChan {
}
doneChan <- cn.Err()
return
}
}
doneChan <- nil
}(notificationChan)
// Ignore notifications while synchronization is going on to avoid
// deadlocks. We have to send a nil notification over Notify anyway as
// we can't possibly know which notifications (if any) were lost while
// the connection was down, so there's no reason to try and process
// these messages at all.
for {
select {
case _, ok := <-notificationChan:
if !ok {
notificationChan = nil
}
case err := <-doneChan:
return err
}
}
}
// caller should NOT be holding l.lock
func (l *Listener) closed() bool {
l.lock.Lock()
defer l.lock.Unlock()
return l.isClosed
}
func (l *Listener) connect() error {
notificationChan := make(chan *Notification, 32)
cn, err := newDialListenerConn(l.dialer, l.name, notificationChan)
if err != nil {
return err
}
l.lock.Lock()
defer l.lock.Unlock()
err = l.resync(cn, notificationChan)
if err != nil {
cn.Close()
return err
}
l.cn = cn
l.connNotificationChan = notificationChan
l.reconnectCond.Broadcast()
return nil
}
// Close disconnects the Listener from the database and shuts it down.
// Subsequent calls to its methods will return an error. Close returns an
// error if the connection has already been closed.
func (l *Listener) Close() error {
l.lock.Lock()
defer l.lock.Unlock()
if l.isClosed {
return errListenerClosed
}
if l.cn != nil {
l.cn.Close()
}
l.isClosed = true
// Unblock calls to Listen()
l.reconnectCond.Broadcast()
return nil
}
func (l *Listener) emitEvent(event ListenerEventType, err error) {
if l.eventCallback != nil {
l.eventCallback(event, err)
}
}
// Main logic here: maintain a connection to the server when possible, wait
// for notifications and emit events.
func (l *Listener) listenerConnLoop() {
var nextReconnect time.Time
reconnectInterval := l.minReconnectInterval
for {
for {
err := l.connect()
if err == nil {
break
}
if l.closed() {
return
}
l.emitEvent(ListenerEventConnectionAttemptFailed, err)
time.Sleep(reconnectInterval)
reconnectInterval *= 2
if reconnectInterval > l.maxReconnectInterval {
reconnectInterval = l.maxReconnectInterval
}
}
if nextReconnect.IsZero() {
l.emitEvent(ListenerEventConnected, nil)
} else {
l.emitEvent(ListenerEventReconnected, nil)
l.Notify <- nil
}
reconnectInterval = l.minReconnectInterval
nextReconnect = time.Now().Add(reconnectInterval)
for {
notification, ok := <-l.connNotificationChan
if !ok {
// lost connection, loop again
break
}
l.Notify <- notification
}
err := l.disconnectCleanup()
if l.closed() {
return
}
l.emitEvent(ListenerEventDisconnected, err)
time.Sleep(time.Until(nextReconnect))
}
}
func (l *Listener) listenerMain() {
l.listenerConnLoop()
close(l.Notify)
}

6
vendor/github.com/lib/pq/oid/doc.go generated vendored Normal file
View File

@ -0,0 +1,6 @@
// Package oid contains OID constants
// as defined by the Postgres server.
package oid
// Oid is a Postgres Object ID.
type Oid uint32

343
vendor/github.com/lib/pq/oid/types.go generated vendored Normal file
View File

@ -0,0 +1,343 @@
// Code generated by gen.go. DO NOT EDIT.
package oid
const (
T_bool Oid = 16
T_bytea Oid = 17
T_char Oid = 18
T_name Oid = 19
T_int8 Oid = 20
T_int2 Oid = 21
T_int2vector Oid = 22
T_int4 Oid = 23
T_regproc Oid = 24
T_text Oid = 25
T_oid Oid = 26
T_tid Oid = 27
T_xid Oid = 28
T_cid Oid = 29
T_oidvector Oid = 30
T_pg_ddl_command Oid = 32
T_pg_type Oid = 71
T_pg_attribute Oid = 75
T_pg_proc Oid = 81
T_pg_class Oid = 83
T_json Oid = 114
T_xml Oid = 142
T__xml Oid = 143
T_pg_node_tree Oid = 194
T__json Oid = 199
T_smgr Oid = 210
T_index_am_handler Oid = 325
T_point Oid = 600
T_lseg Oid = 601
T_path Oid = 602
T_box Oid = 603
T_polygon Oid = 604
T_line Oid = 628
T__line Oid = 629
T_cidr Oid = 650
T__cidr Oid = 651
T_float4 Oid = 700
T_float8 Oid = 701
T_abstime Oid = 702
T_reltime Oid = 703
T_tinterval Oid = 704
T_unknown Oid = 705
T_circle Oid = 718
T__circle Oid = 719
T_money Oid = 790
T__money Oid = 791
T_macaddr Oid = 829
T_inet Oid = 869
T__bool Oid = 1000
T__bytea Oid = 1001
T__char Oid = 1002
T__name Oid = 1003
T__int2 Oid = 1005
T__int2vector Oid = 1006
T__int4 Oid = 1007
T__regproc Oid = 1008
T__text Oid = 1009
T__tid Oid = 1010
T__xid Oid = 1011
T__cid Oid = 1012
T__oidvector Oid = 1013
T__bpchar Oid = 1014
T__varchar Oid = 1015
T__int8 Oid = 1016
T__point Oid = 1017
T__lseg Oid = 1018
T__path Oid = 1019
T__box Oid = 1020
T__float4 Oid = 1021
T__float8 Oid = 1022
T__abstime Oid = 1023
T__reltime Oid = 1024
T__tinterval Oid = 1025
T__polygon Oid = 1027
T__oid Oid = 1028
T_aclitem Oid = 1033
T__aclitem Oid = 1034
T__macaddr Oid = 1040
T__inet Oid = 1041
T_bpchar Oid = 1042
T_varchar Oid = 1043
T_date Oid = 1082
T_time Oid = 1083
T_timestamp Oid = 1114
T__timestamp Oid = 1115
T__date Oid = 1182
T__time Oid = 1183
T_timestamptz Oid = 1184
T__timestamptz Oid = 1185
T_interval Oid = 1186
T__interval Oid = 1187
T__numeric Oid = 1231
T_pg_database Oid = 1248
T__cstring Oid = 1263
T_timetz Oid = 1266
T__timetz Oid = 1270
T_bit Oid = 1560
T__bit Oid = 1561
T_varbit Oid = 1562
T__varbit Oid = 1563
T_numeric Oid = 1700
T_refcursor Oid = 1790
T__refcursor Oid = 2201
T_regprocedure Oid = 2202
T_regoper Oid = 2203
T_regoperator Oid = 2204
T_regclass Oid = 2205
T_regtype Oid = 2206
T__regprocedure Oid = 2207
T__regoper Oid = 2208
T__regoperator Oid = 2209
T__regclass Oid = 2210
T__regtype Oid = 2211
T_record Oid = 2249
T_cstring Oid = 2275
T_any Oid = 2276
T_anyarray Oid = 2277
T_void Oid = 2278
T_trigger Oid = 2279
T_language_handler Oid = 2280
T_internal Oid = 2281
T_opaque Oid = 2282
T_anyelement Oid = 2283
T__record Oid = 2287
T_anynonarray Oid = 2776
T_pg_authid Oid = 2842
T_pg_auth_members Oid = 2843
T__txid_snapshot Oid = 2949
T_uuid Oid = 2950
T__uuid Oid = 2951
T_txid_snapshot Oid = 2970
T_fdw_handler Oid = 3115
T_pg_lsn Oid = 3220
T__pg_lsn Oid = 3221
T_tsm_handler Oid = 3310
T_anyenum Oid = 3500
T_tsvector Oid = 3614
T_tsquery Oid = 3615
T_gtsvector Oid = 3642
T__tsvector Oid = 3643
T__gtsvector Oid = 3644
T__tsquery Oid = 3645
T_regconfig Oid = 3734
T__regconfig Oid = 3735
T_regdictionary Oid = 3769
T__regdictionary Oid = 3770
T_jsonb Oid = 3802
T__jsonb Oid = 3807
T_anyrange Oid = 3831
T_event_trigger Oid = 3838
T_int4range Oid = 3904
T__int4range Oid = 3905
T_numrange Oid = 3906
T__numrange Oid = 3907
T_tsrange Oid = 3908
T__tsrange Oid = 3909
T_tstzrange Oid = 3910
T__tstzrange Oid = 3911
T_daterange Oid = 3912
T__daterange Oid = 3913
T_int8range Oid = 3926
T__int8range Oid = 3927
T_pg_shseclabel Oid = 4066
T_regnamespace Oid = 4089
T__regnamespace Oid = 4090
T_regrole Oid = 4096
T__regrole Oid = 4097
)
var TypeName = map[Oid]string{
T_bool: "BOOL",
T_bytea: "BYTEA",
T_char: "CHAR",
T_name: "NAME",
T_int8: "INT8",
T_int2: "INT2",
T_int2vector: "INT2VECTOR",
T_int4: "INT4",
T_regproc: "REGPROC",
T_text: "TEXT",
T_oid: "OID",
T_tid: "TID",
T_xid: "XID",
T_cid: "CID",
T_oidvector: "OIDVECTOR",
T_pg_ddl_command: "PG_DDL_COMMAND",
T_pg_type: "PG_TYPE",
T_pg_attribute: "PG_ATTRIBUTE",
T_pg_proc: "PG_PROC",
T_pg_class: "PG_CLASS",
T_json: "JSON",
T_xml: "XML",
T__xml: "_XML",
T_pg_node_tree: "PG_NODE_TREE",
T__json: "_JSON",
T_smgr: "SMGR",
T_index_am_handler: "INDEX_AM_HANDLER",
T_point: "POINT",
T_lseg: "LSEG",
T_path: "PATH",
T_box: "BOX",
T_polygon: "POLYGON",
T_line: "LINE",
T__line: "_LINE",
T_cidr: "CIDR",
T__cidr: "_CIDR",
T_float4: "FLOAT4",
T_float8: "FLOAT8",
T_abstime: "ABSTIME",
T_reltime: "RELTIME",
T_tinterval: "TINTERVAL",
T_unknown: "UNKNOWN",
T_circle: "CIRCLE",
T__circle: "_CIRCLE",
T_money: "MONEY",
T__money: "_MONEY",
T_macaddr: "MACADDR",
T_inet: "INET",
T__bool: "_BOOL",
T__bytea: "_BYTEA",
T__char: "_CHAR",
T__name: "_NAME",
T__int2: "_INT2",
T__int2vector: "_INT2VECTOR",
T__int4: "_INT4",
T__regproc: "_REGPROC",
T__text: "_TEXT",
T__tid: "_TID",
T__xid: "_XID",
T__cid: "_CID",
T__oidvector: "_OIDVECTOR",
T__bpchar: "_BPCHAR",
T__varchar: "_VARCHAR",
T__int8: "_INT8",
T__point: "_POINT",
T__lseg: "_LSEG",
T__path: "_PATH",
T__box: "_BOX",
T__float4: "_FLOAT4",
T__float8: "_FLOAT8",
T__abstime: "_ABSTIME",
T__reltime: "_RELTIME",
T__tinterval: "_TINTERVAL",
T__polygon: "_POLYGON",
T__oid: "_OID",
T_aclitem: "ACLITEM",
T__aclitem: "_ACLITEM",
T__macaddr: "_MACADDR",
T__inet: "_INET",
T_bpchar: "BPCHAR",
T_varchar: "VARCHAR",
T_date: "DATE",
T_time: "TIME",
T_timestamp: "TIMESTAMP",
T__timestamp: "_TIMESTAMP",
T__date: "_DATE",
T__time: "_TIME",
T_timestamptz: "TIMESTAMPTZ",
T__timestamptz: "_TIMESTAMPTZ",
T_interval: "INTERVAL",
T__interval: "_INTERVAL",
T__numeric: "_NUMERIC",
T_pg_database: "PG_DATABASE",
T__cstring: "_CSTRING",
T_timetz: "TIMETZ",
T__timetz: "_TIMETZ",
T_bit: "BIT",
T__bit: "_BIT",
T_varbit: "VARBIT",
T__varbit: "_VARBIT",
T_numeric: "NUMERIC",
T_refcursor: "REFCURSOR",
T__refcursor: "_REFCURSOR",
T_regprocedure: "REGPROCEDURE",
T_regoper: "REGOPER",
T_regoperator: "REGOPERATOR",
T_regclass: "REGCLASS",
T_regtype: "REGTYPE",
T__regprocedure: "_REGPROCEDURE",
T__regoper: "_REGOPER",
T__regoperator: "_REGOPERATOR",
T__regclass: "_REGCLASS",
T__regtype: "_REGTYPE",
T_record: "RECORD",
T_cstring: "CSTRING",
T_any: "ANY",
T_anyarray: "ANYARRAY",
T_void: "VOID",
T_trigger: "TRIGGER",
T_language_handler: "LANGUAGE_HANDLER",
T_internal: "INTERNAL",
T_opaque: "OPAQUE",
T_anyelement: "ANYELEMENT",
T__record: "_RECORD",
T_anynonarray: "ANYNONARRAY",
T_pg_authid: "PG_AUTHID",
T_pg_auth_members: "PG_AUTH_MEMBERS",
T__txid_snapshot: "_TXID_SNAPSHOT",
T_uuid: "UUID",
T__uuid: "_UUID",
T_txid_snapshot: "TXID_SNAPSHOT",
T_fdw_handler: "FDW_HANDLER",
T_pg_lsn: "PG_LSN",
T__pg_lsn: "_PG_LSN",
T_tsm_handler: "TSM_HANDLER",
T_anyenum: "ANYENUM",
T_tsvector: "TSVECTOR",
T_tsquery: "TSQUERY",
T_gtsvector: "GTSVECTOR",
T__tsvector: "_TSVECTOR",
T__gtsvector: "_GTSVECTOR",
T__tsquery: "_TSQUERY",
T_regconfig: "REGCONFIG",
T__regconfig: "_REGCONFIG",
T_regdictionary: "REGDICTIONARY",
T__regdictionary: "_REGDICTIONARY",
T_jsonb: "JSONB",
T__jsonb: "_JSONB",
T_anyrange: "ANYRANGE",
T_event_trigger: "EVENT_TRIGGER",
T_int4range: "INT4RANGE",
T__int4range: "_INT4RANGE",
T_numrange: "NUMRANGE",
T__numrange: "_NUMRANGE",
T_tsrange: "TSRANGE",
T__tsrange: "_TSRANGE",
T_tstzrange: "TSTZRANGE",
T__tstzrange: "_TSTZRANGE",
T_daterange: "DATERANGE",
T__daterange: "_DATERANGE",
T_int8range: "INT8RANGE",
T__int8range: "_INT8RANGE",
T_pg_shseclabel: "PG_SHSECLABEL",
T_regnamespace: "REGNAMESPACE",
T__regnamespace: "_REGNAMESPACE",
T_regrole: "REGROLE",
T__regrole: "_REGROLE",
}

93
vendor/github.com/lib/pq/rows.go generated vendored Normal file
View File

@ -0,0 +1,93 @@
package pq
import (
"math"
"reflect"
"time"
"github.com/lib/pq/oid"
)
const headerSize = 4
type fieldDesc struct {
// The object ID of the data type.
OID oid.Oid
// The data type size (see pg_type.typlen).
// Note that negative values denote variable-width types.
Len int
// The type modifier (see pg_attribute.atttypmod).
// The meaning of the modifier is type-specific.
Mod int
}
func (fd fieldDesc) Type() reflect.Type {
switch fd.OID {
case oid.T_int8:
return reflect.TypeOf(int64(0))
case oid.T_int4:
return reflect.TypeOf(int32(0))
case oid.T_int2:
return reflect.TypeOf(int16(0))
case oid.T_varchar, oid.T_text:
return reflect.TypeOf("")
case oid.T_bool:
return reflect.TypeOf(false)
case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz:
return reflect.TypeOf(time.Time{})
case oid.T_bytea:
return reflect.TypeOf([]byte(nil))
default:
return reflect.TypeOf(new(interface{})).Elem()
}
}
func (fd fieldDesc) Name() string {
return oid.TypeName[fd.OID]
}
func (fd fieldDesc) Length() (length int64, ok bool) {
switch fd.OID {
case oid.T_text, oid.T_bytea:
return math.MaxInt64, true
case oid.T_varchar, oid.T_bpchar:
return int64(fd.Mod - headerSize), true
default:
return 0, false
}
}
func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) {
switch fd.OID {
case oid.T_numeric, oid.T__numeric:
mod := fd.Mod - headerSize
precision = int64((mod >> 16) & 0xffff)
scale = int64(mod & 0xffff)
return precision, scale, true
default:
return 0, 0, false
}
}
// ColumnTypeScanType returns the value type that can be used to scan types into.
func (rs *rows) ColumnTypeScanType(index int) reflect.Type {
return rs.colTyps[index].Type()
}
// ColumnTypeDatabaseTypeName return the database system type name.
func (rs *rows) ColumnTypeDatabaseTypeName(index int) string {
return rs.colTyps[index].Name()
}
// ColumnTypeLength returns the length of the column type if the column is a
// variable length type. If the column is not a variable length type ok
// should return false.
func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) {
return rs.colTyps[index].Length()
}
// ColumnTypePrecisionScale should return the precision and scale for decimal
// types. If not applicable, ok should be false.
func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) {
return rs.colTyps[index].PrecisionScale()
}

264
vendor/github.com/lib/pq/scram/scram.go generated vendored Normal file
View File

@ -0,0 +1,264 @@
// Copyright (c) 2014 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Package scram implements a SCRAM-{SHA-1,etc} client per RFC5802.
//
// http://tools.ietf.org/html/rfc5802
//
package scram
import (
"bytes"
"crypto/hmac"
"crypto/rand"
"encoding/base64"
"fmt"
"hash"
"strconv"
"strings"
)
// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc).
//
// A Client may be used within a SASL conversation with logic resembling:
//
// var in []byte
// var client = scram.NewClient(sha1.New, user, pass)
// for client.Step(in) {
// out := client.Out()
// // send out to server
// in := serverOut
// }
// if client.Err() != nil {
// // auth failed
// }
//
type Client struct {
newHash func() hash.Hash
user string
pass string
step int
out bytes.Buffer
err error
clientNonce []byte
serverNonce []byte
saltedPass []byte
authMsg bytes.Buffer
}
// NewClient returns a new SCRAM-* client with the provided hash algorithm.
//
// For SCRAM-SHA-256, for example, use:
//
// client := scram.NewClient(sha256.New, user, pass)
//
func NewClient(newHash func() hash.Hash, user, pass string) *Client {
c := &Client{
newHash: newHash,
user: user,
pass: pass,
}
c.out.Grow(256)
c.authMsg.Grow(256)
return c
}
// Out returns the data to be sent to the server in the current step.
func (c *Client) Out() []byte {
if c.out.Len() == 0 {
return nil
}
return c.out.Bytes()
}
// Err returns the error that occurred, or nil if there were no errors.
func (c *Client) Err() error {
return c.err
}
// SetNonce sets the client nonce to the provided value.
// If not set, the nonce is generated automatically out of crypto/rand on the first step.
func (c *Client) SetNonce(nonce []byte) {
c.clientNonce = nonce
}
var escaper = strings.NewReplacer("=", "=3D", ",", "=2C")
// Step processes the incoming data from the server and makes the
// next round of data for the server available via Client.Out.
// Step returns false if there are no errors and more data is
// still expected.
func (c *Client) Step(in []byte) bool {
c.out.Reset()
if c.step > 2 || c.err != nil {
return false
}
c.step++
switch c.step {
case 1:
c.err = c.step1(in)
case 2:
c.err = c.step2(in)
case 3:
c.err = c.step3(in)
}
return c.step > 2 || c.err != nil
}
func (c *Client) step1(in []byte) error {
if len(c.clientNonce) == 0 {
const nonceLen = 16
buf := make([]byte, nonceLen+b64.EncodedLen(nonceLen))
if _, err := rand.Read(buf[:nonceLen]); err != nil {
return fmt.Errorf("cannot read random SCRAM-SHA-256 nonce from operating system: %v", err)
}
c.clientNonce = buf[nonceLen:]
b64.Encode(c.clientNonce, buf[:nonceLen])
}
c.authMsg.WriteString("n=")
escaper.WriteString(&c.authMsg, c.user)
c.authMsg.WriteString(",r=")
c.authMsg.Write(c.clientNonce)
c.out.WriteString("n,,")
c.out.Write(c.authMsg.Bytes())
return nil
}
var b64 = base64.StdEncoding
func (c *Client) step2(in []byte) error {
c.authMsg.WriteByte(',')
c.authMsg.Write(in)
fields := bytes.Split(in, []byte(","))
if len(fields) != 3 {
return fmt.Errorf("expected 3 fields in first SCRAM-SHA-256 server message, got %d: %q", len(fields), in)
}
if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 {
return fmt.Errorf("server sent an invalid SCRAM-SHA-256 nonce: %q", fields[0])
}
if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 {
return fmt.Errorf("server sent an invalid SCRAM-SHA-256 salt: %q", fields[1])
}
if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 {
return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2])
}
c.serverNonce = fields[0][2:]
if !bytes.HasPrefix(c.serverNonce, c.clientNonce) {
return fmt.Errorf("server SCRAM-SHA-256 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce)
}
salt := make([]byte, b64.DecodedLen(len(fields[1][2:])))
n, err := b64.Decode(salt, fields[1][2:])
if err != nil {
return fmt.Errorf("cannot decode SCRAM-SHA-256 salt sent by server: %q", fields[1])
}
salt = salt[:n]
iterCount, err := strconv.Atoi(string(fields[2][2:]))
if err != nil {
return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2])
}
c.saltPassword(salt, iterCount)
c.authMsg.WriteString(",c=biws,r=")
c.authMsg.Write(c.serverNonce)
c.out.WriteString("c=biws,r=")
c.out.Write(c.serverNonce)
c.out.WriteString(",p=")
c.out.Write(c.clientProof())
return nil
}
func (c *Client) step3(in []byte) error {
var isv, ise bool
var fields = bytes.Split(in, []byte(","))
if len(fields) == 1 {
isv = bytes.HasPrefix(fields[0], []byte("v="))
ise = bytes.HasPrefix(fields[0], []byte("e="))
}
if ise {
return fmt.Errorf("SCRAM-SHA-256 authentication error: %s", fields[0][2:])
} else if !isv {
return fmt.Errorf("unsupported SCRAM-SHA-256 final message from server: %q", in)
}
if !bytes.Equal(c.serverSignature(), fields[0][2:]) {
return fmt.Errorf("cannot authenticate SCRAM-SHA-256 server signature: %q", fields[0][2:])
}
return nil
}
func (c *Client) saltPassword(salt []byte, iterCount int) {
mac := hmac.New(c.newHash, []byte(c.pass))
mac.Write(salt)
mac.Write([]byte{0, 0, 0, 1})
ui := mac.Sum(nil)
hi := make([]byte, len(ui))
copy(hi, ui)
for i := 1; i < iterCount; i++ {
mac.Reset()
mac.Write(ui)
mac.Sum(ui[:0])
for j, b := range ui {
hi[j] ^= b
}
}
c.saltedPass = hi
}
func (c *Client) clientProof() []byte {
mac := hmac.New(c.newHash, c.saltedPass)
mac.Write([]byte("Client Key"))
clientKey := mac.Sum(nil)
hash := c.newHash()
hash.Write(clientKey)
storedKey := hash.Sum(nil)
mac = hmac.New(c.newHash, storedKey)
mac.Write(c.authMsg.Bytes())
clientProof := mac.Sum(nil)
for i, b := range clientKey {
clientProof[i] ^= b
}
clientProof64 := make([]byte, b64.EncodedLen(len(clientProof)))
b64.Encode(clientProof64, clientProof)
return clientProof64
}
func (c *Client) serverSignature() []byte {
mac := hmac.New(c.newHash, c.saltedPass)
mac.Write([]byte("Server Key"))
serverKey := mac.Sum(nil)
mac = hmac.New(c.newHash, serverKey)
mac.Write(c.authMsg.Bytes())
serverSignature := mac.Sum(nil)
encoded := make([]byte, b64.EncodedLen(len(serverSignature)))
b64.Encode(encoded, serverSignature)
return encoded
}

204
vendor/github.com/lib/pq/ssl.go generated vendored Normal file
View File

@ -0,0 +1,204 @@
package pq
import (
"crypto/tls"
"crypto/x509"
"io/ioutil"
"net"
"os"
"os/user"
"path/filepath"
"strings"
)
// ssl generates a function to upgrade a net.Conn based on the "sslmode" and
// related settings. The function is nil when no upgrade should take place.
func ssl(o values) (func(net.Conn) (net.Conn, error), error) {
verifyCaOnly := false
tlsConf := tls.Config{}
switch mode := o["sslmode"]; mode {
// "require" is the default.
case "", "require":
// We must skip TLS's own verification since it requires full
// verification since Go 1.3.
tlsConf.InsecureSkipVerify = true
// From http://www.postgresql.org/docs/current/static/libpq-ssl.html:
//
// Note: For backwards compatibility with earlier versions of
// PostgreSQL, if a root CA file exists, the behavior of
// sslmode=require will be the same as that of verify-ca, meaning the
// server certificate is validated against the CA. Relying on this
// behavior is discouraged, and applications that need certificate
// validation should always use verify-ca or verify-full.
if sslrootcert, ok := o["sslrootcert"]; ok {
if _, err := os.Stat(sslrootcert); err == nil {
verifyCaOnly = true
} else {
delete(o, "sslrootcert")
}
}
case "verify-ca":
// We must skip TLS's own verification since it requires full
// verification since Go 1.3.
tlsConf.InsecureSkipVerify = true
verifyCaOnly = true
case "verify-full":
tlsConf.ServerName = o["host"]
case "disable":
return nil, nil
default:
return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode)
}
// Set Server Name Indication (SNI), if enabled by connection parameters.
// By default SNI is on, any value which is not starting with "1" disables
// SNI -- that is the same check vanilla libpq uses.
if sslsni := o["sslsni"]; sslsni == "" || strings.HasPrefix(sslsni, "1") {
// RFC 6066 asks to not set SNI if the host is a literal IP address (IPv4
// or IPv6). This check is coded already crypto.tls.hostnameInSNI, so
// just always set ServerName here and let crypto/tls do the filtering.
tlsConf.ServerName = o["host"]
}
err := sslClientCertificates(&tlsConf, o)
if err != nil {
return nil, err
}
err = sslCertificateAuthority(&tlsConf, o)
if err != nil {
return nil, err
}
// Accept renegotiation requests initiated by the backend.
//
// Renegotiation was deprecated then removed from PostgreSQL 9.5, but
// the default configuration of older versions has it enabled. Redshift
// also initiates renegotiations and cannot be reconfigured.
tlsConf.Renegotiation = tls.RenegotiateFreelyAsClient
return func(conn net.Conn) (net.Conn, error) {
client := tls.Client(conn, &tlsConf)
if verifyCaOnly {
err := sslVerifyCertificateAuthority(client, &tlsConf)
if err != nil {
return nil, err
}
}
return client, nil
}, nil
}
// sslClientCertificates adds the certificate specified in the "sslcert" and
// "sslkey" settings, or if they aren't set, from the .postgresql directory
// in the user's home directory. The configured files must exist and have
// the correct permissions.
func sslClientCertificates(tlsConf *tls.Config, o values) error {
sslinline := o["sslinline"]
if sslinline == "true" {
cert, err := tls.X509KeyPair([]byte(o["sslcert"]), []byte(o["sslkey"]))
if err != nil {
return err
}
tlsConf.Certificates = []tls.Certificate{cert}
return nil
}
// user.Current() might fail when cross-compiling. We have to ignore the
// error and continue without home directory defaults, since we wouldn't
// know from where to load them.
user, _ := user.Current()
// In libpq, the client certificate is only loaded if the setting is not blank.
//
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037
sslcert := o["sslcert"]
if len(sslcert) == 0 && user != nil {
sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt")
}
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045
if len(sslcert) == 0 {
return nil
}
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054
if _, err := os.Stat(sslcert); os.IsNotExist(err) {
return nil
} else if err != nil {
return err
}
// In libpq, the ssl key is only loaded if the setting is not blank.
//
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222
sslkey := o["sslkey"]
if len(sslkey) == 0 && user != nil {
sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key")
}
if len(sslkey) > 0 {
if err := sslKeyPermissions(sslkey); err != nil {
return err
}
}
cert, err := tls.LoadX509KeyPair(sslcert, sslkey)
if err != nil {
return err
}
tlsConf.Certificates = []tls.Certificate{cert}
return nil
}
// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting.
func sslCertificateAuthority(tlsConf *tls.Config, o values) error {
// In libpq, the root certificate is only loaded if the setting is not blank.
//
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951
if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 {
tlsConf.RootCAs = x509.NewCertPool()
sslinline := o["sslinline"]
var cert []byte
if sslinline == "true" {
cert = []byte(sslrootcert)
} else {
var err error
cert, err = ioutil.ReadFile(sslrootcert)
if err != nil {
return err
}
}
if !tlsConf.RootCAs.AppendCertsFromPEM(cert) {
return fmterrorf("couldn't parse pem in sslrootcert")
}
}
return nil
}
// sslVerifyCertificateAuthority carries out a TLS handshake to the server and
// verifies the presented certificate against the CA, i.e. the one specified in
// sslrootcert or the system CA if sslrootcert was not specified.
func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error {
err := client.Handshake()
if err != nil {
return err
}
certs := client.ConnectionState().PeerCertificates
opts := x509.VerifyOptions{
DNSName: client.ConnectionState().ServerName,
Intermediates: x509.NewCertPool(),
Roots: tlsConf.RootCAs,
}
for i, cert := range certs {
if i == 0 {
continue
}
opts.Intermediates.AddCert(cert)
}
_, err = certs[0].Verify(opts)
return err
}

93
vendor/github.com/lib/pq/ssl_permissions.go generated vendored Normal file
View File

@ -0,0 +1,93 @@
//go:build !windows
// +build !windows
package pq
import (
"errors"
"os"
"syscall"
)
const (
rootUserID = uint32(0)
// The maximum permissions that a private key file owned by a regular user
// is allowed to have. This translates to u=rw.
maxUserOwnedKeyPermissions os.FileMode = 0600
// The maximum permissions that a private key file owned by root is allowed
// to have. This translates to u=rw,g=r.
maxRootOwnedKeyPermissions os.FileMode = 0640
)
var (
errSSLKeyHasUnacceptableUserPermissions = errors.New("permissions for files not owned by root should be u=rw (0600) or less")
errSSLKeyHasUnacceptableRootPermissions = errors.New("permissions for root owned files should be u=rw,g=r (0640) or less")
)
// sslKeyPermissions checks the permissions on user-supplied ssl key files.
// The key file should have very little access.
//
// libpq does not check key file permissions on Windows.
func sslKeyPermissions(sslkey string) error {
info, err := os.Stat(sslkey)
if err != nil {
return err
}
err = hasCorrectPermissions(info)
// return ErrSSLKeyHasWorldPermissions for backwards compatability with
// existing code.
if err == errSSLKeyHasUnacceptableUserPermissions || err == errSSLKeyHasUnacceptableRootPermissions {
err = ErrSSLKeyHasWorldPermissions
}
return err
}
// hasCorrectPermissions checks the file info (and the unix-specific stat_t
// output) to verify that the permissions on the file are correct.
//
// If the file is owned by the same user the process is running as,
// the file should only have 0600 (u=rw). If the file is owned by root,
// and the group matches the group that the process is running in, the
// permissions cannot be more than 0640 (u=rw,g=r). The file should
// never have world permissions.
//
// Returns an error when the permission check fails.
func hasCorrectPermissions(info os.FileInfo) error {
// if file's permission matches 0600, allow access.
userPermissionMask := (os.FileMode(0777) ^ maxUserOwnedKeyPermissions)
// regardless of if we're running as root or not, 0600 is acceptable,
// so we return if we match the regular user permission mask.
if info.Mode().Perm()&userPermissionMask == 0 {
return nil
}
// We need to pull the Unix file information to get the file's owner.
// If we can't access it, there's some sort of operating system level error
// and we should fail rather than attempting to use faulty information.
sysInfo := info.Sys()
if sysInfo == nil {
return ErrSSLKeyUnknownOwnership
}
unixStat, ok := sysInfo.(*syscall.Stat_t)
if !ok {
return ErrSSLKeyUnknownOwnership
}
// if the file is owned by root, we allow 0640 (u=rw,g=r) to match what
// Postgres does.
if unixStat.Uid == rootUserID {
rootPermissionMask := (os.FileMode(0777) ^ maxRootOwnedKeyPermissions)
if info.Mode().Perm()&rootPermissionMask != 0 {
return errSSLKeyHasUnacceptableRootPermissions
}
return nil
}
return errSSLKeyHasUnacceptableUserPermissions
}

10
vendor/github.com/lib/pq/ssl_windows.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
//go:build windows
// +build windows
package pq
// sslKeyPermissions checks the permissions on user-supplied ssl key files.
// The key file should have very little access.
//
// libpq does not check key file permissions on Windows.
func sslKeyPermissions(string) error { return nil }

76
vendor/github.com/lib/pq/url.go generated vendored Normal file
View File

@ -0,0 +1,76 @@
package pq
import (
"fmt"
"net"
nurl "net/url"
"sort"
"strings"
)
// ParseURL no longer needs to be used by clients of this library since supplying a URL as a
// connection string to sql.Open() is now supported:
//
// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full")
//
// It remains exported here for backwards-compatibility.
//
// ParseURL converts a url to a connection string for driver.Open.
// Example:
//
// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full"
//
// converts to:
//
// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full"
//
// A minimal example:
//
// "postgres://"
//
// This will be blank, causing driver.Open to use all of the defaults
func ParseURL(url string) (string, error) {
u, err := nurl.Parse(url)
if err != nil {
return "", err
}
if u.Scheme != "postgres" && u.Scheme != "postgresql" {
return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme)
}
var kvs []string
escaper := strings.NewReplacer(`'`, `\'`, `\`, `\\`)
accrue := func(k, v string) {
if v != "" {
kvs = append(kvs, k+"='"+escaper.Replace(v)+"'")
}
}
if u.User != nil {
v := u.User.Username()
accrue("user", v)
v, _ = u.User.Password()
accrue("password", v)
}
if host, port, err := net.SplitHostPort(u.Host); err != nil {
accrue("host", u.Host)
} else {
accrue("host", host)
accrue("port", port)
}
if u.Path != "" {
accrue("dbname", u.Path[1:])
}
q := u.Query()
for k := range q {
accrue(k, q.Get(k))
}
sort.Strings(kvs) // Makes testing easier (not a performance concern)
return strings.Join(kvs, " "), nil
}

10
vendor/github.com/lib/pq/user_other.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
// Package pq is a pure Go Postgres driver for the database/sql package.
//go:build js || android || hurd || zos
// +build js android hurd zos
package pq
func userCurrent() (string, error) {
return "", ErrCouldNotDetectUsername
}

25
vendor/github.com/lib/pq/user_posix.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
// Package pq is a pure Go Postgres driver for the database/sql package.
//go:build aix || darwin || dragonfly || freebsd || (linux && !android) || nacl || netbsd || openbsd || plan9 || solaris || rumprun || illumos
// +build aix darwin dragonfly freebsd linux,!android nacl netbsd openbsd plan9 solaris rumprun illumos
package pq
import (
"os"
"os/user"
)
func userCurrent() (string, error) {
u, err := user.Current()
if err == nil {
return u.Username, nil
}
name := os.Getenv("USER")
if name != "" {
return name, nil
}
return "", ErrCouldNotDetectUsername
}

27
vendor/github.com/lib/pq/user_windows.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
// Package pq is a pure Go Postgres driver for the database/sql package.
package pq
import (
"path/filepath"
"syscall"
)
// Perform Windows user name lookup identically to libpq.
//
// The PostgreSQL code makes use of the legacy Win32 function
// GetUserName, and that function has not been imported into stock Go.
// GetUserNameEx is available though, the difference being that a
// wider range of names are available. To get the output to be the
// same as GetUserName, only the base (or last) component of the
// result is returned.
func userCurrent() (string, error) {
pw_name := make([]uint16, 128)
pwname_size := uint32(len(pw_name)) - 1
err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size)
if err != nil {
return "", ErrCouldNotDetectUsername
}
s := syscall.UTF16ToString(pw_name)
u := filepath.Base(s)
return u, nil
}

23
vendor/github.com/lib/pq/uuid.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package pq
import (
"encoding/hex"
"fmt"
)
// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format.
func decodeUUIDBinary(src []byte) ([]byte, error) {
if len(src) != 16 {
return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src))
}
dst := make([]byte, 36)
dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-'
hex.Encode(dst[0:], src[0:4])
hex.Encode(dst[9:], src[4:6])
hex.Encode(dst[14:], src[6:8])
hex.Encode(dst[19:], src[8:10])
hex.Encode(dst[24:], src[10:16])
return dst, nil
}

14
vendor/modules.txt vendored Normal file
View File

@ -0,0 +1,14 @@
# github.com/felixge/httpsnoop v1.0.3
## explicit; go 1.13
github.com/felixge/httpsnoop
# github.com/gorilla/handlers v1.5.2
## explicit; go 1.20
github.com/gorilla/handlers
# github.com/gorilla/mux v1.8.1
## explicit; go 1.20
github.com/gorilla/mux
# github.com/lib/pq v1.10.9
## explicit; go 1.13
github.com/lib/pq
github.com/lib/pq/oid
github.com/lib/pq/scram