terraform test: Experimental JUnit XML reporting

This adds an experimental new option -junit-xml=FILENAME for the
"terraform test" command. When specified, it writes a JUnit XML report to
the specified filename once the test run is complete, while continuing to
report test progress in the UI in the usual way.

This is only experimental for now because it remains to be seen if this
particular mapping to the JUnit XML schema is actually useful in real
software -- this format is woefully underdocumented and implemented
slightly differently by each consumer -- and so we might change this
significantly before stabilizing it, or remove it altogether if it turns
out that there's no useful mapping to JUnit XML here. Hopefully those who
are interested in JUnit XML reports will try this experiment against their
favorite JUnit XML-consuming software and report back whether the report
is presented in a helpful way.

It's a de-facto convention for JUnit XML to be reported separately to a
file, rather than replacing the normal test run output, since tools that
consume this format tend to present its results in a separate and less
prominent place than the output of the command itself. This option is
designed to follow that convention for consistency with various other
software that produces this format.

The implementation here is intentionally pretty minimal and simplistic just
as a starting point for gathering feedback. The main priority is that it
be easy to evolve this based on feedback and to remove it altogether if we
decide not to stabilize this at all. If this does become stabilized, it
might deserve being factored out into a separate package so that we can
minimize the amount of logic embedded directly inside the views package,
and it will certainly need some unit tests to represent what we've
committed to supporting in future versions.
apparentlymart-patch-3
Martin Atkins 2 years ago
parent 1298fcd412
commit b43ddd8ff6

@ -27,6 +27,11 @@ type Test struct {
// ViewType specifies which output format to use: human or JSON.
ViewType ViewType
// JUnitXMLFile specifies an optional filename to write a JUnit XML test
// result report to, in addition to the information written to the selected
// view type.
JUnitXMLFile string
// You can specify common variables for all tests from the command line.
Vars *Vars
@ -48,6 +53,7 @@ func ParseTest(args []string) (*Test, tfdiags.Diagnostics) {
cmdFlags.Var((*flagStringSlice)(&test.Filter), "filter", "filter")
cmdFlags.StringVar(&test.TestDirectory, "test-directory", configs.DefaultTestDirectory, "test-directory")
cmdFlags.BoolVar(&jsonOutput, "json", false, "json")
cmdFlags.StringVar(&test.JUnitXMLFile, "junit-xml", "", "junit-xml")
cmdFlags.BoolVar(&test.Verbose, "verbose", false, "verbose")
// TODO: Finalise the name of this flag.

@ -5,6 +5,7 @@ package command
import (
"context"
"fmt"
"path/filepath"
"strings"
"time"
@ -99,6 +100,31 @@ func (c *TestCommand) Run(rawArgs []string) int {
}
view := views.NewTest(args.ViewType, c.View)
var junitXMLView *views.TestJUnitXMLFile
if args.JUnitXMLFile != "" {
// JUnit XML output is currently experimental, so that we can gather
// feedback on exactly how we should map the test results to this
// JUnit-oriented format before anyone starts depending on it for real.
if !c.AllowExperimentalFeatures {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"JUnit XML output is not available",
"The -junit-xml option is currently experimental and therefore available only in alpha releases of Terraform CLI.",
))
view.Diagnostics(nil, nil, diags)
return 1
}
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Warning,
"JUnit XML output is experimental",
"The -junit-xml option is currently experimental and therefore subject to breaking changes or removal, even in patch releases.",
))
junitXMLView = views.NewTestJUnitXMLFile(args.JUnitXMLFile)
view = views.TestMulti{
view,
junitXMLView,
}
}
// The specified testing directory must be a relative path, and it must
// point to a directory that is a descendent of the configuration directory.
@ -278,6 +304,16 @@ func (c *TestCommand) Run(rawArgs []string) int {
// tests finished normally with no interrupts.
}
if junitXMLView != nil {
if err := junitXMLView.Err(); err != nil {
testDiags = testDiags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to write JUnit XML report",
fmt.Sprintf("Could not write the requested JUnit XML report: %s.", err),
))
}
}
view.Diagnostics(nil, nil, testDiags)
if status != moduletest.Pass {

@ -5,8 +5,12 @@ package views
import (
"bytes"
"encoding/xml"
"fmt"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/hashicorp/go-tfe"
@ -732,6 +736,276 @@ func (t *TestJSON) TFCRetryHook(attemptNum int, resp *http.Response) {
)
}
// TestJUnitXMLFile produces a JUnit XML file at the conclusion of a test
// run, summarizing the outcome of the test in a form that can then be
// interpreted by tools which render JUnit XML result reports.
//
// The de-facto convention for JUnit XML is for it to be emitted as a separate
// file as a complement to human-oriented output, rather than _instead of_
// human-oriented output, and so this view meets that expectation by creating
// a new file only once the test run has completed, at the "Conclusion" event.
// If that event isn't reached for any reason then no file will be created at
// all, which JUnit XML-consuming tools tend to expect as an outcome of a
// catastrophically-errored test suite.
//
// Views cannot return errors directly from their events, so if this view fails
// to create or write to the designated file when asked to report the conclusion
// it will save the error as part of its state, accessible from method
// [TestJUnitXMLFile.Err].
//
// This view is intended only for use in conjunction with another view that
// provides the streaming output of ongoing testing events, so it should
// typically be wrapped in a [TestMulti] along with either [TestHuman] or
// [TestJSON].
type TestJUnitXMLFile struct {
filename string
err error
}
var _ Test = (*TestJUnitXMLFile)(nil)
// NewTestJUnitXML returns a [Test] implementation that will, when asked to
// report "conclusion", write a JUnit XML report to the given filename.
//
// If the file already exists then this view will silently overwrite it at the
// point of being asked to write a conclusion. Otherwise it will create the
// file at that time. If creating or overwriting the file fails, a subsequent
// call to method Err will return information about the problem.
func NewTestJUnitXMLFile(filename string) *TestJUnitXMLFile {
return &TestJUnitXMLFile{
filename: filename,
}
}
// Err returns an error that the receiver previously encountered when trying
// to handle the Conclusion event by creating and writing into a file.
//
// Returns nil if either there was no error or if this object hasn't yet been
// asked to report a conclusion.
func (v *TestJUnitXMLFile) Err() error {
return v.err
}
func (v *TestJUnitXMLFile) Abstract(suite *moduletest.Suite) {}
func (v *TestJUnitXMLFile) Conclusion(suite *moduletest.Suite) {
xmlSrc, err := junitXMLTestReport(suite)
if err != nil {
v.err = err
return
}
err = os.WriteFile(v.filename, xmlSrc, 0660)
if err != nil {
v.err = err
return
}
}
func (v *TestJUnitXMLFile) File(file *moduletest.File, progress moduletest.Progress) {}
func (v *TestJUnitXMLFile) Run(run *moduletest.Run, file *moduletest.File, progress moduletest.Progress, elapsed int64) {
}
func (v *TestJUnitXMLFile) DestroySummary(diags tfdiags.Diagnostics, run *moduletest.Run, file *moduletest.File, state *states.State) {
}
func (v *TestJUnitXMLFile) Diagnostics(run *moduletest.Run, file *moduletest.File, diags tfdiags.Diagnostics) {
}
func (v *TestJUnitXMLFile) Interrupted() {}
func (v *TestJUnitXMLFile) FatalInterrupt() {}
func (v *TestJUnitXMLFile) FatalInterruptSummary(run *moduletest.Run, file *moduletest.File, states map[*moduletest.Run]*states.State, created []*plans.ResourceInstanceChangeSrc) {
}
func (v *TestJUnitXMLFile) TFCStatusUpdate(status tfe.TestRunStatus, elapsed time.Duration) {}
func (v *TestJUnitXMLFile) TFCRetryHook(attemptNum int, resp *http.Response) {}
func junitXMLTestReport(suite *moduletest.Suite) ([]byte, error) {
var buf bytes.Buffer
enc := xml.NewEncoder(&buf)
enc.EncodeToken(xml.ProcInst{
Target: "xml",
Inst: []byte(`version="1.0" encoding="UTF-8`),
})
enc.Indent("", " ")
// Some common element/attribute names we'll use repeatedly below.
suitesName := xml.Name{Local: "testsuites"}
suiteName := xml.Name{Local: "testsuite"}
caseName := xml.Name{Local: "testcase"}
nameName := xml.Name{Local: "name"}
testsName := xml.Name{Local: "tests"}
failuresName := xml.Name{Local: "failures"}
errorsName := xml.Name{Local: "errors"}
enc.EncodeToken(xml.StartElement{Name: suitesName})
for _, file := range suite.Files {
// Each test file is modelled as a "test suite".
// First we'll count the number of tests and number of failures/errors
// for the suite-level summary.
totalTests := len(file.Runs)
totalFails := 0
totalErrs := 0
for _, run := range file.Runs {
switch run.Status {
case moduletest.Fail:
totalFails++
case moduletest.Error:
totalErrs++
}
}
enc.EncodeToken(xml.StartElement{
Name: suiteName,
Attr: []xml.Attr{
{Name: nameName, Value: file.Name},
{Name: testsName, Value: strconv.Itoa(totalTests)},
{Name: failuresName, Value: strconv.Itoa(totalFails)},
{Name: errorsName, Value: strconv.Itoa(totalErrs)},
},
})
for _, run := range file.Runs {
// Each run is a "test case".
type WithMessage struct {
Message string `xml:"message,attr,omitempty"`
Body string `xml:",cdata"`
}
type TestCase struct {
Name string `xml:"name,attr"`
Failure *WithMessage `xml:"failure,omitempty"`
Error *WithMessage `xml:"error,omitempty"`
Stderr *WithMessage `xml:"system-err,omitempty"`
}
testCase := TestCase{
Name: run.Name,
}
switch run.Status {
case moduletest.Fail:
testCase.Failure = &WithMessage{
Message: "Test run failed",
// FIXME: What's a useful thing to report in the body
// here? A summary of the statuses from all of the
// checkable objects in the configuration?
}
case moduletest.Error:
var diagsStr strings.Builder
for _, diag := range run.Diagnostics {
// FIXME: Pass in the sources so that these diagnostics
// can include source snippets when appropriate.
diagsStr.WriteString(format.DiagnosticPlain(diag, nil, 80))
}
testCase.Error = &WithMessage{
Message: "Encountered an error",
Body: diagsStr.String(),
}
}
if len(run.Diagnostics) != 0 && testCase.Error == nil {
// If we have diagnostics but the outcome wasn't an error
// then we're presumably holding diagnostics that didn't
// cause the test to error, such as warnings. We'll place
// those into the "system-err" element instead, so that
// they'll be reported _somewhere_ at least.
var diagsStr strings.Builder
for _, diag := range run.Diagnostics {
// FIXME: Pass in the sources so that these diagnostics
// can include source snippets when appropriate.
diagsStr.WriteString(format.DiagnosticPlain(diag, nil, 80))
}
testCase.Stderr = &WithMessage{
Body: diagsStr.String(),
}
}
enc.EncodeElement(&testCase, xml.StartElement{
Name: caseName,
})
}
enc.EncodeToken(xml.EndElement{Name: suiteName})
}
enc.EncodeToken(xml.EndElement{Name: suitesName})
enc.Close()
return buf.Bytes(), nil
}
// TestMulti is an fan-out adapter which delegates all calls to all of the
// wrapped test views, for situations where multiple outputs are needed at
// the same time.
type TestMulti []Test
var _ Test = TestMulti(nil)
func (m TestMulti) Abstract(suite *moduletest.Suite) {
for _, wrapped := range m {
wrapped.Abstract(suite)
}
}
func (m TestMulti) Conclusion(suite *moduletest.Suite) {
for _, wrapped := range m {
wrapped.Conclusion(suite)
}
}
func (m TestMulti) File(file *moduletest.File, progress moduletest.Progress) {
for _, wrapped := range m {
wrapped.File(file, progress)
}
}
func (m TestMulti) Run(run *moduletest.Run, file *moduletest.File, progress moduletest.Progress, elapsed int64) {
for _, wrapped := range m {
wrapped.Run(run, file, progress, elapsed)
}
}
func (m TestMulti) DestroySummary(diags tfdiags.Diagnostics, run *moduletest.Run, file *moduletest.File, state *states.State) {
for _, wrapped := range m {
wrapped.DestroySummary(diags, run, file, state)
}
}
func (m TestMulti) Diagnostics(run *moduletest.Run, file *moduletest.File, diags tfdiags.Diagnostics) {
for _, wrapped := range m {
wrapped.Diagnostics(run, file, diags)
}
}
func (m TestMulti) Interrupted() {
for _, wrapped := range m {
wrapped.Interrupted()
}
}
func (m TestMulti) FatalInterrupt() {
for _, wrapped := range m {
wrapped.FatalInterrupt()
}
}
func (m TestMulti) FatalInterruptSummary(run *moduletest.Run, file *moduletest.File, states map[*moduletest.Run]*states.State, created []*plans.ResourceInstanceChangeSrc) {
for _, wrapped := range m {
wrapped.FatalInterruptSummary(run, file, states, created)
}
}
func (m TestMulti) TFCStatusUpdate(status tfe.TestRunStatus, elapsed time.Duration) {
for _, wrapped := range m {
wrapped.TFCStatusUpdate(status, elapsed)
}
}
func (m TestMulti) TFCRetryHook(attemptNum int, resp *http.Response) {
for _, wrapped := range m {
wrapped.TFCRetryHook(attemptNum, resp)
}
}
func colorizeTestStatus(status moduletest.Status, color *colorstring.Colorize) string {
switch status {
case moduletest.Error, moduletest.Fail:

Loading…
Cancel
Save