Update yaml parser
Mark the top level Loglevel field as deprecated Signed-off-by: Derek McGowan <derek@mcgstyle.net>master
							parent
							
								
									94deea2951
								
							
						
					
					
						commit
						f0ee5720a5
					
				|  | @ -30,7 +30,7 @@ type Configuration struct { | |||
| 		} `yaml:"accesslog,omitempty"` | ||||
| 
 | ||||
| 		// Level is the granularity at which registry operations are logged.
 | ||||
| 		Level Loglevel `yaml:"level"` | ||||
| 		Level Loglevel `yaml:"level,omitempty"` | ||||
| 
 | ||||
| 		// Formatter overrides the default formatter with another. Options
 | ||||
| 		// include "text", "json" and "logstash".
 | ||||
|  | @ -45,8 +45,9 @@ type Configuration struct { | |||
| 		Hooks []LogHook `yaml:"hooks,omitempty"` | ||||
| 	} | ||||
| 
 | ||||
| 	// Loglevel is the level at which registry operations are logged. This is
 | ||||
| 	// deprecated. Please use Log.Level in the future.
 | ||||
| 	// Loglevel is the level at which registry operations are logged.
 | ||||
| 	//
 | ||||
| 	// Deprecated: Use Log.Level instead.
 | ||||
| 	Loglevel Loglevel `yaml:"loglevel,omitempty"` | ||||
| 
 | ||||
| 	// Storage is the configuration for the registry's storage driver
 | ||||
|  | @ -640,8 +641,15 @@ func Parse(rd io.Reader) (*Configuration, error) { | |||
| 			ParseAs: reflect.TypeOf(v0_1Configuration{}), | ||||
| 			ConversionFunc: func(c interface{}) (interface{}, error) { | ||||
| 				if v0_1, ok := c.(*v0_1Configuration); ok { | ||||
| 					if v0_1.Loglevel == Loglevel("") { | ||||
| 						v0_1.Loglevel = Loglevel("info") | ||||
| 					if v0_1.Log.Level == Loglevel("") { | ||||
| 						if v0_1.Loglevel != Loglevel("") { | ||||
| 							v0_1.Log.Level = v0_1.Loglevel | ||||
| 						} else { | ||||
| 							v0_1.Log.Level = Loglevel("info") | ||||
| 						} | ||||
| 					} | ||||
| 					if v0_1.Loglevel != Loglevel("") { | ||||
| 						v0_1.Loglevel = Loglevel("") | ||||
| 					} | ||||
| 					if v0_1.Storage.Type() == "" { | ||||
| 						return nil, errors.New("No storage configuration provided") | ||||
|  |  | |||
|  | @ -22,14 +22,14 @@ var configStruct = Configuration{ | |||
| 		AccessLog struct { | ||||
| 			Disabled bool `yaml:"disabled,omitempty"` | ||||
| 		} `yaml:"accesslog,omitempty"` | ||||
| 		Level     Loglevel               `yaml:"level"` | ||||
| 		Level     Loglevel               `yaml:"level,omitempty"` | ||||
| 		Formatter string                 `yaml:"formatter,omitempty"` | ||||
| 		Fields    map[string]interface{} `yaml:"fields,omitempty"` | ||||
| 		Hooks     []LogHook              `yaml:"hooks,omitempty"` | ||||
| 	}{ | ||||
| 		Level:  "info", | ||||
| 		Fields: map[string]interface{}{"environment": "test"}, | ||||
| 	}, | ||||
| 	Loglevel: "info", | ||||
| 	Storage: Storage{ | ||||
| 		"s3": Parameters{ | ||||
| 			"region":        "us-east-1", | ||||
|  | @ -126,9 +126,9 @@ var configStruct = Configuration{ | |||
| var configYamlV0_1 = ` | ||||
| version: 0.1 | ||||
| log: | ||||
|   level: info | ||||
|   fields: | ||||
|     environment: test | ||||
| loglevel: info | ||||
| storage: | ||||
|   s3: | ||||
|     region: us-east-1 | ||||
|  | @ -171,7 +171,8 @@ http: | |||
| // storage driver with no parameters
 | ||||
| var inmemoryConfigYamlV0_1 = ` | ||||
| version: 0.1 | ||||
| loglevel: info | ||||
| log: | ||||
|   level: info | ||||
| storage: inmemory | ||||
| auth: | ||||
|   silly: | ||||
|  | @ -212,6 +213,7 @@ func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) { | |||
| 	configBytes, err := yaml.Marshal(suite.expectedConfig) | ||||
| 	c.Assert(err, IsNil) | ||||
| 	config, err := Parse(bytes.NewReader(configBytes)) | ||||
| 	c.Log(string(configBytes)) | ||||
| 	c.Assert(err, IsNil) | ||||
| 	c.Assert(config, DeepEquals, suite.expectedConfig) | ||||
| } | ||||
|  | @ -334,9 +336,9 @@ func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *C) { | |||
| // TestParseWithDifferentEnvLoglevel validates that providing an environment variable defining the
 | ||||
| // log level will override the value provided in the yaml document
 | ||||
| func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) { | ||||
| 	suite.expectedConfig.Loglevel = "error" | ||||
| 	suite.expectedConfig.Log.Level = "error" | ||||
| 
 | ||||
| 	os.Setenv("REGISTRY_LOGLEVEL", "error") | ||||
| 	os.Setenv("REGISTRY_LOG_LEVEL", "error") | ||||
| 
 | ||||
| 	config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) | ||||
| 	c.Assert(err, IsNil) | ||||
|  |  | |||
|  | @ -237,13 +237,6 @@ func configureReporting(app *handlers.App) http.Handler { | |||
| // configureLogging prepares the context with a logger using the
 | ||||
| // configuration.
 | ||||
| func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) { | ||||
| 	if config.Log.Level == "" && config.Log.Formatter == "" { | ||||
| 		// If no config for logging is set, fallback to deprecated "Loglevel".
 | ||||
| 		log.SetLevel(logLevel(config.Loglevel)) | ||||
| 		ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx)) | ||||
| 		return ctx, nil | ||||
| 	} | ||||
| 
 | ||||
| 	log.SetLevel(logLevel(config.Log.Level)) | ||||
| 
 | ||||
| 	formatter := config.Log.Formatter | ||||
|  |  | |||
|  | @ -45,7 +45,7 @@ google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 | |||
| google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 | ||||
| gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 | ||||
| gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b | ||||
| gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420 | ||||
| gopkg.in/yaml.v2 v2.2.1 | ||||
| rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git | ||||
| github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb | ||||
| github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882 | ||||
|  |  | |||
|  | @ -1,188 +1,201 @@ | |||
|                                  Apache License | ||||
|                            Version 2.0, January 2004 | ||||
|                         http://www.apache.org/licenses/ | ||||
| 
 | ||||
| Copyright (c) 2011-2014 - Canonical Inc. | ||||
|    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
| 
 | ||||
| This software is licensed under the LGPLv3, included below. | ||||
|    1. Definitions. | ||||
| 
 | ||||
| As a special exception to the GNU Lesser General Public License version 3 | ||||
| ("LGPL3"), the copyright holders of this Library give you permission to | ||||
| convey to a third party a Combined Work that links statically or dynamically | ||||
| to this Library without providing any Minimal Corresponding Source or | ||||
| Minimal Application Code as set out in 4d or providing the installation | ||||
| information set out in section 4e, provided that you comply with the other | ||||
| provisions of LGPL3 and provided that you meet, for the Application the | ||||
| terms and conditions of the license(s) which apply to the Application. | ||||
|       "License" shall mean the terms and conditions for use, reproduction, | ||||
|       and distribution as defined by Sections 1 through 9 of this document. | ||||
| 
 | ||||
| Except as stated in this special exception, the provisions of LGPL3 will | ||||
| continue to comply in full to this Library. If you modify this Library, you | ||||
| may apply this exception to your version of this Library, but you are not | ||||
| obliged to do so. If you do not wish to do so, delete this exception | ||||
| statement from your version. This exception does not (and cannot) modify any | ||||
| license terms which apply to the Application, with which you must still | ||||
| comply. | ||||
|       "Licensor" shall mean the copyright owner or entity authorized by | ||||
|       the copyright owner that is granting the License. | ||||
| 
 | ||||
|       "Legal Entity" shall mean the union of the acting entity and all | ||||
|       other entities that control, are controlled by, or are under common | ||||
|       control with that entity. For the purposes of this definition, | ||||
|       "control" means (i) the power, direct or indirect, to cause the | ||||
|       direction or management of such entity, whether by contract or | ||||
|       otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||||
|       outstanding shares, or (iii) beneficial ownership of such entity. | ||||
| 
 | ||||
|                    GNU LESSER GENERAL PUBLIC LICENSE | ||||
|                        Version 3, 29 June 2007 | ||||
|       "You" (or "Your") shall mean an individual or Legal Entity | ||||
|       exercising permissions granted by this License. | ||||
| 
 | ||||
|  Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> | ||||
|  Everyone is permitted to copy and distribute verbatim copies | ||||
|  of this license document, but changing it is not allowed. | ||||
|       "Source" form shall mean the preferred form for making modifications, | ||||
|       including but not limited to software source code, documentation | ||||
|       source, and configuration files. | ||||
| 
 | ||||
|       "Object" form shall mean any form resulting from mechanical | ||||
|       transformation or translation of a Source form, including but | ||||
|       not limited to compiled object code, generated documentation, | ||||
|       and conversions to other media types. | ||||
| 
 | ||||
|   This version of the GNU Lesser General Public License incorporates | ||||
| the terms and conditions of version 3 of the GNU General Public | ||||
| License, supplemented by the additional permissions listed below. | ||||
|       "Work" shall mean the work of authorship, whether in Source or | ||||
|       Object form, made available under the License, as indicated by a | ||||
|       copyright notice that is included in or attached to the work | ||||
|       (an example is provided in the Appendix below). | ||||
| 
 | ||||
|   0. Additional Definitions. | ||||
|       "Derivative Works" shall mean any work, whether in Source or Object | ||||
|       form, that is based on (or derived from) the Work and for which the | ||||
|       editorial revisions, annotations, elaborations, or other modifications | ||||
|       represent, as a whole, an original work of authorship. For the purposes | ||||
|       of this License, Derivative Works shall not include works that remain | ||||
|       separable from, or merely link (or bind by name) to the interfaces of, | ||||
|       the Work and Derivative Works thereof. | ||||
| 
 | ||||
|   As used herein, "this License" refers to version 3 of the GNU Lesser | ||||
| General Public License, and the "GNU GPL" refers to version 3 of the GNU | ||||
| General Public License. | ||||
|       "Contribution" shall mean any work of authorship, including | ||||
|       the original version of the Work and any modifications or additions | ||||
|       to that Work or Derivative Works thereof, that is intentionally | ||||
|       submitted to Licensor for inclusion in the Work by the copyright owner | ||||
|       or by an individual or Legal Entity authorized to submit on behalf of | ||||
|       the copyright owner. For the purposes of this definition, "submitted" | ||||
|       means any form of electronic, verbal, or written communication sent | ||||
|       to the Licensor or its representatives, including but not limited to | ||||
|       communication on electronic mailing lists, source code control systems, | ||||
|       and issue tracking systems that are managed by, or on behalf of, the | ||||
|       Licensor for the purpose of discussing and improving the Work, but | ||||
|       excluding communication that is conspicuously marked or otherwise | ||||
|       designated in writing by the copyright owner as "Not a Contribution." | ||||
| 
 | ||||
|   "The Library" refers to a covered work governed by this License, | ||||
| other than an Application or a Combined Work as defined below. | ||||
|       "Contributor" shall mean Licensor and any individual or Legal Entity | ||||
|       on behalf of whom a Contribution has been received by Licensor and | ||||
|       subsequently incorporated within the Work. | ||||
| 
 | ||||
|   An "Application" is any work that makes use of an interface provided | ||||
| by the Library, but which is not otherwise based on the Library. | ||||
| Defining a subclass of a class defined by the Library is deemed a mode | ||||
| of using an interface provided by the Library. | ||||
|    2. Grant of Copyright License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       copyright license to reproduce, prepare Derivative Works of, | ||||
|       publicly display, publicly perform, sublicense, and distribute the | ||||
|       Work and such Derivative Works in Source or Object form. | ||||
| 
 | ||||
|   A "Combined Work" is a work produced by combining or linking an | ||||
| Application with the Library.  The particular version of the Library | ||||
| with which the Combined Work was made is also called the "Linked | ||||
| Version". | ||||
|    3. Grant of Patent License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       (except as stated in this section) patent license to make, have made, | ||||
|       use, offer to sell, sell, import, and otherwise transfer the Work, | ||||
|       where such license applies only to those patent claims licensable | ||||
|       by such Contributor that are necessarily infringed by their | ||||
|       Contribution(s) alone or by combination of their Contribution(s) | ||||
|       with the Work to which such Contribution(s) was submitted. If You | ||||
|       institute patent litigation against any entity (including a | ||||
|       cross-claim or counterclaim in a lawsuit) alleging that the Work | ||||
|       or a Contribution incorporated within the Work constitutes direct | ||||
|       or contributory patent infringement, then any patent licenses | ||||
|       granted to You under this License for that Work shall terminate | ||||
|       as of the date such litigation is filed. | ||||
| 
 | ||||
|   The "Minimal Corresponding Source" for a Combined Work means the | ||||
| Corresponding Source for the Combined Work, excluding any source code | ||||
| for portions of the Combined Work that, considered in isolation, are | ||||
| based on the Application, and not on the Linked Version. | ||||
|    4. Redistribution. You may reproduce and distribute copies of the | ||||
|       Work or Derivative Works thereof in any medium, with or without | ||||
|       modifications, and in Source or Object form, provided that You | ||||
|       meet the following conditions: | ||||
| 
 | ||||
|   The "Corresponding Application Code" for a Combined Work means the | ||||
| object code and/or source code for the Application, including any data | ||||
| and utility programs needed for reproducing the Combined Work from the | ||||
| Application, but excluding the System Libraries of the Combined Work. | ||||
|       (a) You must give any other recipients of the Work or | ||||
|           Derivative Works a copy of this License; and | ||||
| 
 | ||||
|   1. Exception to Section 3 of the GNU GPL. | ||||
|       (b) You must cause any modified files to carry prominent notices | ||||
|           stating that You changed the files; and | ||||
| 
 | ||||
|   You may convey a covered work under sections 3 and 4 of this License | ||||
| without being bound by section 3 of the GNU GPL. | ||||
|       (c) You must retain, in the Source form of any Derivative Works | ||||
|           that You distribute, all copyright, patent, trademark, and | ||||
|           attribution notices from the Source form of the Work, | ||||
|           excluding those notices that do not pertain to any part of | ||||
|           the Derivative Works; and | ||||
| 
 | ||||
|   2. Conveying Modified Versions. | ||||
|       (d) If the Work includes a "NOTICE" text file as part of its | ||||
|           distribution, then any Derivative Works that You distribute must | ||||
|           include a readable copy of the attribution notices contained | ||||
|           within such NOTICE file, excluding those notices that do not | ||||
|           pertain to any part of the Derivative Works, in at least one | ||||
|           of the following places: within a NOTICE text file distributed | ||||
|           as part of the Derivative Works; within the Source form or | ||||
|           documentation, if provided along with the Derivative Works; or, | ||||
|           within a display generated by the Derivative Works, if and | ||||
|           wherever such third-party notices normally appear. The contents | ||||
|           of the NOTICE file are for informational purposes only and | ||||
|           do not modify the License. You may add Your own attribution | ||||
|           notices within Derivative Works that You distribute, alongside | ||||
|           or as an addendum to the NOTICE text from the Work, provided | ||||
|           that such additional attribution notices cannot be construed | ||||
|           as modifying the License. | ||||
| 
 | ||||
|   If you modify a copy of the Library, and, in your modifications, a | ||||
| facility refers to a function or data to be supplied by an Application | ||||
| that uses the facility (other than as an argument passed when the | ||||
| facility is invoked), then you may convey a copy of the modified | ||||
| version: | ||||
|       You may add Your own copyright statement to Your modifications and | ||||
|       may provide additional or different license terms and conditions | ||||
|       for use, reproduction, or distribution of Your modifications, or | ||||
|       for any such Derivative Works as a whole, provided Your use, | ||||
|       reproduction, and distribution of the Work otherwise complies with | ||||
|       the conditions stated in this License. | ||||
| 
 | ||||
|    a) under this License, provided that you make a good faith effort to | ||||
|    ensure that, in the event an Application does not supply the | ||||
|    function or data, the facility still operates, and performs | ||||
|    whatever part of its purpose remains meaningful, or | ||||
|    5. Submission of Contributions. Unless You explicitly state otherwise, | ||||
|       any Contribution intentionally submitted for inclusion in the Work | ||||
|       by You to the Licensor shall be under the terms and conditions of | ||||
|       this License, without any additional terms or conditions. | ||||
|       Notwithstanding the above, nothing herein shall supersede or modify | ||||
|       the terms of any separate license agreement you may have executed | ||||
|       with Licensor regarding such Contributions. | ||||
| 
 | ||||
|    b) under the GNU GPL, with none of the additional permissions of | ||||
|    this License applicable to that copy. | ||||
|    6. Trademarks. This License does not grant permission to use the trade | ||||
|       names, trademarks, service marks, or product names of the Licensor, | ||||
|       except as required for reasonable and customary use in describing the | ||||
|       origin of the Work and reproducing the content of the NOTICE file. | ||||
| 
 | ||||
|   3. Object Code Incorporating Material from Library Header Files. | ||||
|    7. Disclaimer of Warranty. Unless required by applicable law or | ||||
|       agreed to in writing, Licensor provides the Work (and each | ||||
|       Contributor provides its Contributions) on an "AS IS" BASIS, | ||||
|       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
|       implied, including, without limitation, any warranties or conditions | ||||
|       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||||
|       PARTICULAR PURPOSE. You are solely responsible for determining the | ||||
|       appropriateness of using or redistributing the Work and assume any | ||||
|       risks associated with Your exercise of permissions under this License. | ||||
| 
 | ||||
|   The object code form of an Application may incorporate material from | ||||
| a header file that is part of the Library.  You may convey such object | ||||
| code under terms of your choice, provided that, if the incorporated | ||||
| material is not limited to numerical parameters, data structure | ||||
| layouts and accessors, or small macros, inline functions and templates | ||||
| (ten or fewer lines in length), you do both of the following: | ||||
|    8. Limitation of Liability. In no event and under no legal theory, | ||||
|       whether in tort (including negligence), contract, or otherwise, | ||||
|       unless required by applicable law (such as deliberate and grossly | ||||
|       negligent acts) or agreed to in writing, shall any Contributor be | ||||
|       liable to You for damages, including any direct, indirect, special, | ||||
|       incidental, or consequential damages of any character arising as a | ||||
|       result of this License or out of the use or inability to use the | ||||
|       Work (including but not limited to damages for loss of goodwill, | ||||
|       work stoppage, computer failure or malfunction, or any and all | ||||
|       other commercial damages or losses), even if such Contributor | ||||
|       has been advised of the possibility of such damages. | ||||
| 
 | ||||
|    a) Give prominent notice with each copy of the object code that the | ||||
|    Library is used in it and that the Library and its use are | ||||
|    covered by this License. | ||||
|    9. Accepting Warranty or Additional Liability. While redistributing | ||||
|       the Work or Derivative Works thereof, You may choose to offer, | ||||
|       and charge a fee for, acceptance of support, warranty, indemnity, | ||||
|       or other liability obligations and/or rights consistent with this | ||||
|       License. However, in accepting such obligations, You may act only | ||||
|       on Your own behalf and on Your sole responsibility, not on behalf | ||||
|       of any other Contributor, and only if You agree to indemnify, | ||||
|       defend, and hold each Contributor harmless for any liability | ||||
|       incurred by, or claims asserted against, such Contributor by reason | ||||
|       of your accepting any such warranty or additional liability. | ||||
| 
 | ||||
|    b) Accompany the object code with a copy of the GNU GPL and this license | ||||
|    document. | ||||
|    END OF TERMS AND CONDITIONS | ||||
| 
 | ||||
|   4. Combined Works. | ||||
|    APPENDIX: How to apply the Apache License to your work. | ||||
| 
 | ||||
|   You may convey a Combined Work under terms of your choice that, | ||||
| taken together, effectively do not restrict modification of the | ||||
| portions of the Library contained in the Combined Work and reverse | ||||
| engineering for debugging such modifications, if you also do each of | ||||
| the following: | ||||
|       To apply the Apache License to your work, attach the following | ||||
|       boilerplate notice, with the fields enclosed by brackets "{}" | ||||
|       replaced with your own identifying information. (Don't include | ||||
|       the brackets!)  The text should be enclosed in the appropriate | ||||
|       comment syntax for the file format. We also recommend that a | ||||
|       file or class name and description of purpose be included on the | ||||
|       same "printed page" as the copyright notice for easier | ||||
|       identification within third-party archives. | ||||
| 
 | ||||
|    a) Give prominent notice with each copy of the Combined Work that | ||||
|    the Library is used in it and that the Library and its use are | ||||
|    covered by this License. | ||||
|    Copyright {yyyy} {name of copyright owner} | ||||
| 
 | ||||
|    b) Accompany the Combined Work with a copy of the GNU GPL and this license | ||||
|    document. | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
| 
 | ||||
|    c) For a Combined Work that displays copyright notices during | ||||
|    execution, include the copyright notice for the Library among | ||||
|    these notices, as well as a reference directing the user to the | ||||
|    copies of the GNU GPL and this license document. | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
| 
 | ||||
|    d) Do one of the following: | ||||
| 
 | ||||
|        0) Convey the Minimal Corresponding Source under the terms of this | ||||
|        License, and the Corresponding Application Code in a form | ||||
|        suitable for, and under terms that permit, the user to | ||||
|        recombine or relink the Application with a modified version of | ||||
|        the Linked Version to produce a modified Combined Work, in the | ||||
|        manner specified by section 6 of the GNU GPL for conveying | ||||
|        Corresponding Source. | ||||
| 
 | ||||
|        1) Use a suitable shared library mechanism for linking with the | ||||
|        Library.  A suitable mechanism is one that (a) uses at run time | ||||
|        a copy of the Library already present on the user's computer | ||||
|        system, and (b) will operate properly with a modified version | ||||
|        of the Library that is interface-compatible with the Linked | ||||
|        Version. | ||||
| 
 | ||||
|    e) Provide Installation Information, but only if you would otherwise | ||||
|    be required to provide such information under section 6 of the | ||||
|    GNU GPL, and only to the extent that such information is | ||||
|    necessary to install and execute a modified version of the | ||||
|    Combined Work produced by recombining or relinking the | ||||
|    Application with a modified version of the Linked Version. (If | ||||
|    you use option 4d0, the Installation Information must accompany | ||||
|    the Minimal Corresponding Source and Corresponding Application | ||||
|    Code. If you use option 4d1, you must provide the Installation | ||||
|    Information in the manner specified by section 6 of the GNU GPL | ||||
|    for conveying Corresponding Source.) | ||||
| 
 | ||||
|   5. Combined Libraries. | ||||
| 
 | ||||
|   You may place library facilities that are a work based on the | ||||
| Library side by side in a single library together with other library | ||||
| facilities that are not Applications and are not covered by this | ||||
| License, and convey such a combined library under terms of your | ||||
| choice, if you do both of the following: | ||||
| 
 | ||||
|    a) Accompany the combined library with a copy of the same work based | ||||
|    on the Library, uncombined with any other library facilities, | ||||
|    conveyed under the terms of this License. | ||||
| 
 | ||||
|    b) Give prominent notice with the combined library that part of it | ||||
|    is a work based on the Library, and explaining where to find the | ||||
|    accompanying uncombined form of the same work. | ||||
| 
 | ||||
|   6. Revised Versions of the GNU Lesser General Public License. | ||||
| 
 | ||||
|   The Free Software Foundation may publish revised and/or new versions | ||||
| of the GNU Lesser General Public License from time to time. Such new | ||||
| versions will be similar in spirit to the present version, but may | ||||
| differ in detail to address new problems or concerns. | ||||
| 
 | ||||
|   Each version is given a distinguishing version number. If the | ||||
| Library as you received it specifies that a certain numbered version | ||||
| of the GNU Lesser General Public License "or any later version" | ||||
| applies to it, you have the option of following the terms and | ||||
| conditions either of that published version or of any later version | ||||
| published by the Free Software Foundation. If the Library as you | ||||
| received it does not specify a version number of the GNU Lesser | ||||
| General Public License, you may choose any version of the GNU Lesser | ||||
| General Public License ever published by the Free Software Foundation. | ||||
| 
 | ||||
|   If the Library as you received it specifies that a proxy can decide | ||||
| whether future versions of the GNU Lesser General Public License shall | ||||
| apply, that proxy's public statement of acceptance of any version is | ||||
| permanent authorization for you to choose that version for the | ||||
| Library. | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
|  |  | |||
|  | @ -0,0 +1,13 @@ | |||
| Copyright 2011-2016 Canonical Ltd. | ||||
| 
 | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
| 
 | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| 
 | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
|  | @ -42,7 +42,7 @@ The package API for yaml v2 will remain stable as described in [gopkg.in](https: | |||
| License | ||||
| ------- | ||||
| 
 | ||||
| The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. | ||||
| The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. | ||||
| 
 | ||||
| 
 | ||||
| Example | ||||
|  | @ -65,9 +65,14 @@ b: | |||
|   d: [3, 4] | ||||
| ` | ||||
| 
 | ||||
| // Note: struct fields must be public in order for unmarshal to | ||||
| // correctly populate the data. | ||||
| type T struct { | ||||
|         A string | ||||
|         B struct{C int; D []int ",flow"} | ||||
|         B struct { | ||||
|                 RenamedC int   `yaml:"c"` | ||||
|                 D        []int `yaml:",flow"` | ||||
|         } | ||||
| } | ||||
| 
 | ||||
| func main() { | ||||
|  |  | |||
|  | @ -2,7 +2,6 @@ package yaml | |||
| 
 | ||||
| import ( | ||||
| 	"io" | ||||
| 	"os" | ||||
| ) | ||||
| 
 | ||||
| func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { | ||||
|  | @ -48,9 +47,9 @@ func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err | |||
| 	return n, nil | ||||
| } | ||||
| 
 | ||||
| // File read handler.
 | ||||
| func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { | ||||
| 	return parser.input_file.Read(buffer) | ||||
| // Reader read handler.
 | ||||
| func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { | ||||
| 	return parser.input_reader.Read(buffer) | ||||
| } | ||||
| 
 | ||||
| // Set a string input.
 | ||||
|  | @ -64,12 +63,12 @@ func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { | |||
| } | ||||
| 
 | ||||
| // Set a file input.
 | ||||
| func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { | ||||
| func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { | ||||
| 	if parser.read_handler != nil { | ||||
| 		panic("must set the input source only once") | ||||
| 	} | ||||
| 	parser.read_handler = yaml_file_read_handler | ||||
| 	parser.input_file = file | ||||
| 	parser.read_handler = yaml_reader_read_handler | ||||
| 	parser.input_reader = r | ||||
| } | ||||
| 
 | ||||
| // Set the source encoding.
 | ||||
|  | @ -81,14 +80,13 @@ func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { | |||
| } | ||||
| 
 | ||||
| // Create a new emitter object.
 | ||||
| func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { | ||||
| func yaml_emitter_initialize(emitter *yaml_emitter_t) { | ||||
| 	*emitter = yaml_emitter_t{ | ||||
| 		buffer:     make([]byte, output_buffer_size), | ||||
| 		raw_buffer: make([]byte, 0, output_raw_buffer_size), | ||||
| 		states:     make([]yaml_emitter_state_t, 0, initial_stack_size), | ||||
| 		events:     make([]yaml_event_t, 0, initial_queue_size), | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
| 
 | ||||
| // Destroy an emitter object.
 | ||||
|  | @ -102,9 +100,10 @@ func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { | |||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // File write handler.
 | ||||
| func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { | ||||
| 	_, err := emitter.output_file.Write(buffer) | ||||
| // yaml_writer_write_handler uses emitter.output_writer to write the
 | ||||
| // emitted text.
 | ||||
| func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { | ||||
| 	_, err := emitter.output_writer.Write(buffer) | ||||
| 	return err | ||||
| } | ||||
| 
 | ||||
|  | @ -118,12 +117,12 @@ func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]by | |||
| } | ||||
| 
 | ||||
| // Set a file output.
 | ||||
| func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { | ||||
| func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { | ||||
| 	if emitter.write_handler != nil { | ||||
| 		panic("must set the output target only once") | ||||
| 	} | ||||
| 	emitter.write_handler = yaml_file_write_handler | ||||
| 	emitter.output_file = file | ||||
| 	emitter.write_handler = yaml_writer_write_handler | ||||
| 	emitter.output_writer = w | ||||
| } | ||||
| 
 | ||||
| // Set the output encoding.
 | ||||
|  | @ -252,41 +251,41 @@ func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { | |||
| //
 | ||||
| 
 | ||||
| // Create STREAM-START.
 | ||||
| func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { | ||||
| func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { | ||||
| 	*event = yaml_event_t{ | ||||
| 		typ:      yaml_STREAM_START_EVENT, | ||||
| 		encoding: encoding, | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
| 
 | ||||
| // Create STREAM-END.
 | ||||
| func yaml_stream_end_event_initialize(event *yaml_event_t) bool { | ||||
| func yaml_stream_end_event_initialize(event *yaml_event_t) { | ||||
| 	*event = yaml_event_t{ | ||||
| 		typ: yaml_STREAM_END_EVENT, | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
| 
 | ||||
| // Create DOCUMENT-START.
 | ||||
| func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, | ||||
| 	tag_directives []yaml_tag_directive_t, implicit bool) bool { | ||||
| func yaml_document_start_event_initialize( | ||||
| 	event *yaml_event_t, | ||||
| 	version_directive *yaml_version_directive_t, | ||||
| 	tag_directives []yaml_tag_directive_t, | ||||
| 	implicit bool, | ||||
| ) { | ||||
| 	*event = yaml_event_t{ | ||||
| 		typ:               yaml_DOCUMENT_START_EVENT, | ||||
| 		version_directive: version_directive, | ||||
| 		tag_directives:    tag_directives, | ||||
| 		implicit:          implicit, | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
| 
 | ||||
| // Create DOCUMENT-END.
 | ||||
| func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { | ||||
| func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { | ||||
| 	*event = yaml_event_t{ | ||||
| 		typ:      yaml_DOCUMENT_END_EVENT, | ||||
| 		implicit: implicit, | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
| 
 | ||||
| ///*
 | ||||
|  | @ -348,7 +347,7 @@ func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { | |||
| } | ||||
| 
 | ||||
| // Create MAPPING-START.
 | ||||
| func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { | ||||
| func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { | ||||
| 	*event = yaml_event_t{ | ||||
| 		typ:      yaml_MAPPING_START_EVENT, | ||||
| 		anchor:   anchor, | ||||
|  | @ -356,15 +355,13 @@ func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte | |||
| 		implicit: implicit, | ||||
| 		style:    yaml_style_t(style), | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
| 
 | ||||
| // Create MAPPING-END.
 | ||||
| func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { | ||||
| func yaml_mapping_end_event_initialize(event *yaml_event_t) { | ||||
| 	*event = yaml_event_t{ | ||||
| 		typ: yaml_MAPPING_END_EVENT, | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
| 
 | ||||
| // Destroy an event object.
 | ||||
|  | @ -471,7 +468,7 @@ func yaml_event_delete(event *yaml_event_t) { | |||
| //    } context
 | ||||
| //    tag_directive *yaml_tag_directive_t
 | ||||
| //
 | ||||
| //    context.error = YAML_NO_ERROR // Eliminate a compliler warning.
 | ||||
| //    context.error = YAML_NO_ERROR // Eliminate a compiler warning.
 | ||||
| //
 | ||||
| //    assert(document) // Non-NULL document object is expected.
 | ||||
| //
 | ||||
|  |  | |||
|  | @ -4,6 +4,7 @@ import ( | |||
| 	"encoding" | ||||
| 	"encoding/base64" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"math" | ||||
| 	"reflect" | ||||
| 	"strconv" | ||||
|  | @ -22,19 +23,22 @@ type node struct { | |||
| 	kind         int | ||||
| 	line, column int | ||||
| 	tag          string | ||||
| 	value        string | ||||
| 	implicit     bool | ||||
| 	children     []*node | ||||
| 	anchors      map[string]*node | ||||
| 	// For an alias node, alias holds the resolved alias.
 | ||||
| 	alias    *node | ||||
| 	value    string | ||||
| 	implicit bool | ||||
| 	children []*node | ||||
| 	anchors  map[string]*node | ||||
| } | ||||
| 
 | ||||
| // ----------------------------------------------------------------------------
 | ||||
| // Parser, produces a node tree out of a libyaml event stream.
 | ||||
| 
 | ||||
| type parser struct { | ||||
| 	parser yaml_parser_t | ||||
| 	event  yaml_event_t | ||||
| 	doc    *node | ||||
| 	parser   yaml_parser_t | ||||
| 	event    yaml_event_t | ||||
| 	doc      *node | ||||
| 	doneInit bool | ||||
| } | ||||
| 
 | ||||
| func newParser(b []byte) *parser { | ||||
|  | @ -42,21 +46,30 @@ func newParser(b []byte) *parser { | |||
| 	if !yaml_parser_initialize(&p.parser) { | ||||
| 		panic("failed to initialize YAML emitter") | ||||
| 	} | ||||
| 
 | ||||
| 	if len(b) == 0 { | ||||
| 		b = []byte{'\n'} | ||||
| 	} | ||||
| 
 | ||||
| 	yaml_parser_set_input_string(&p.parser, b) | ||||
| 
 | ||||
| 	p.skip() | ||||
| 	if p.event.typ != yaml_STREAM_START_EVENT { | ||||
| 		panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) | ||||
| 	} | ||||
| 	p.skip() | ||||
| 	return &p | ||||
| } | ||||
| 
 | ||||
| func newParserFromReader(r io.Reader) *parser { | ||||
| 	p := parser{} | ||||
| 	if !yaml_parser_initialize(&p.parser) { | ||||
| 		panic("failed to initialize YAML emitter") | ||||
| 	} | ||||
| 	yaml_parser_set_input_reader(&p.parser, r) | ||||
| 	return &p | ||||
| } | ||||
| 
 | ||||
| func (p *parser) init() { | ||||
| 	if p.doneInit { | ||||
| 		return | ||||
| 	} | ||||
| 	p.expect(yaml_STREAM_START_EVENT) | ||||
| 	p.doneInit = true | ||||
| } | ||||
| 
 | ||||
| func (p *parser) destroy() { | ||||
| 	if p.event.typ != yaml_NO_EVENT { | ||||
| 		yaml_event_delete(&p.event) | ||||
|  | @ -64,16 +77,35 @@ func (p *parser) destroy() { | |||
| 	yaml_parser_delete(&p.parser) | ||||
| } | ||||
| 
 | ||||
| func (p *parser) skip() { | ||||
| 	if p.event.typ != yaml_NO_EVENT { | ||||
| 		if p.event.typ == yaml_STREAM_END_EVENT { | ||||
| 			failf("attempted to go past the end of stream; corrupted value?") | ||||
| // expect consumes an event from the event stream and
 | ||||
| // checks that it's of the expected type.
 | ||||
| func (p *parser) expect(e yaml_event_type_t) { | ||||
| 	if p.event.typ == yaml_NO_EVENT { | ||||
| 		if !yaml_parser_parse(&p.parser, &p.event) { | ||||
| 			p.fail() | ||||
| 		} | ||||
| 		yaml_event_delete(&p.event) | ||||
| 	} | ||||
| 	if p.event.typ == yaml_STREAM_END_EVENT { | ||||
| 		failf("attempted to go past the end of stream; corrupted value?") | ||||
| 	} | ||||
| 	if p.event.typ != e { | ||||
| 		p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) | ||||
| 		p.fail() | ||||
| 	} | ||||
| 	yaml_event_delete(&p.event) | ||||
| 	p.event.typ = yaml_NO_EVENT | ||||
| } | ||||
| 
 | ||||
| // peek peeks at the next event in the event stream,
 | ||||
| // puts the results into p.event and returns the event type.
 | ||||
| func (p *parser) peek() yaml_event_type_t { | ||||
| 	if p.event.typ != yaml_NO_EVENT { | ||||
| 		return p.event.typ | ||||
| 	} | ||||
| 	if !yaml_parser_parse(&p.parser, &p.event) { | ||||
| 		p.fail() | ||||
| 	} | ||||
| 	return p.event.typ | ||||
| } | ||||
| 
 | ||||
| func (p *parser) fail() { | ||||
|  | @ -81,6 +113,10 @@ func (p *parser) fail() { | |||
| 	var line int | ||||
| 	if p.parser.problem_mark.line != 0 { | ||||
| 		line = p.parser.problem_mark.line | ||||
| 		// Scanner errors don't iterate line before returning error
 | ||||
| 		if p.parser.error == yaml_SCANNER_ERROR { | ||||
| 			line++ | ||||
| 		} | ||||
| 	} else if p.parser.context_mark.line != 0 { | ||||
| 		line = p.parser.context_mark.line | ||||
| 	} | ||||
|  | @ -103,7 +139,8 @@ func (p *parser) anchor(n *node, anchor []byte) { | |||
| } | ||||
| 
 | ||||
| func (p *parser) parse() *node { | ||||
| 	switch p.event.typ { | ||||
| 	p.init() | ||||
| 	switch p.peek() { | ||||
| 	case yaml_SCALAR_EVENT: | ||||
| 		return p.scalar() | ||||
| 	case yaml_ALIAS_EVENT: | ||||
|  | @ -118,9 +155,8 @@ func (p *parser) parse() *node { | |||
| 		// Happens when attempting to decode an empty buffer.
 | ||||
| 		return nil | ||||
| 	default: | ||||
| 		panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) | ||||
| 		panic("attempted to parse unknown event: " + p.event.typ.String()) | ||||
| 	} | ||||
| 	panic("unreachable") | ||||
| } | ||||
| 
 | ||||
| func (p *parser) node(kind int) *node { | ||||
|  | @ -135,19 +171,20 @@ func (p *parser) document() *node { | |||
| 	n := p.node(documentNode) | ||||
| 	n.anchors = make(map[string]*node) | ||||
| 	p.doc = n | ||||
| 	p.skip() | ||||
| 	p.expect(yaml_DOCUMENT_START_EVENT) | ||||
| 	n.children = append(n.children, p.parse()) | ||||
| 	if p.event.typ != yaml_DOCUMENT_END_EVENT { | ||||
| 		panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) | ||||
| 	} | ||||
| 	p.skip() | ||||
| 	p.expect(yaml_DOCUMENT_END_EVENT) | ||||
| 	return n | ||||
| } | ||||
| 
 | ||||
| func (p *parser) alias() *node { | ||||
| 	n := p.node(aliasNode) | ||||
| 	n.value = string(p.event.anchor) | ||||
| 	p.skip() | ||||
| 	n.alias = p.doc.anchors[n.value] | ||||
| 	if n.alias == nil { | ||||
| 		failf("unknown anchor '%s' referenced", n.value) | ||||
| 	} | ||||
| 	p.expect(yaml_ALIAS_EVENT) | ||||
| 	return n | ||||
| } | ||||
| 
 | ||||
|  | @ -157,29 +194,29 @@ func (p *parser) scalar() *node { | |||
| 	n.tag = string(p.event.tag) | ||||
| 	n.implicit = p.event.implicit | ||||
| 	p.anchor(n, p.event.anchor) | ||||
| 	p.skip() | ||||
| 	p.expect(yaml_SCALAR_EVENT) | ||||
| 	return n | ||||
| } | ||||
| 
 | ||||
| func (p *parser) sequence() *node { | ||||
| 	n := p.node(sequenceNode) | ||||
| 	p.anchor(n, p.event.anchor) | ||||
| 	p.skip() | ||||
| 	for p.event.typ != yaml_SEQUENCE_END_EVENT { | ||||
| 	p.expect(yaml_SEQUENCE_START_EVENT) | ||||
| 	for p.peek() != yaml_SEQUENCE_END_EVENT { | ||||
| 		n.children = append(n.children, p.parse()) | ||||
| 	} | ||||
| 	p.skip() | ||||
| 	p.expect(yaml_SEQUENCE_END_EVENT) | ||||
| 	return n | ||||
| } | ||||
| 
 | ||||
| func (p *parser) mapping() *node { | ||||
| 	n := p.node(mappingNode) | ||||
| 	p.anchor(n, p.event.anchor) | ||||
| 	p.skip() | ||||
| 	for p.event.typ != yaml_MAPPING_END_EVENT { | ||||
| 	p.expect(yaml_MAPPING_START_EVENT) | ||||
| 	for p.peek() != yaml_MAPPING_END_EVENT { | ||||
| 		n.children = append(n.children, p.parse(), p.parse()) | ||||
| 	} | ||||
| 	p.skip() | ||||
| 	p.expect(yaml_MAPPING_END_EVENT) | ||||
| 	return n | ||||
| } | ||||
| 
 | ||||
|  | @ -188,9 +225,10 @@ func (p *parser) mapping() *node { | |||
| 
 | ||||
| type decoder struct { | ||||
| 	doc     *node | ||||
| 	aliases map[string]bool | ||||
| 	aliases map[*node]bool | ||||
| 	mapType reflect.Type | ||||
| 	terrors []string | ||||
| 	strict  bool | ||||
| } | ||||
| 
 | ||||
| var ( | ||||
|  | @ -198,11 +236,13 @@ var ( | |||
| 	durationType   = reflect.TypeOf(time.Duration(0)) | ||||
| 	defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) | ||||
| 	ifaceType      = defaultMapType.Elem() | ||||
| 	timeType       = reflect.TypeOf(time.Time{}) | ||||
| 	ptrTimeType    = reflect.TypeOf(&time.Time{}) | ||||
| ) | ||||
| 
 | ||||
| func newDecoder() *decoder { | ||||
| 	d := &decoder{mapType: defaultMapType} | ||||
| 	d.aliases = make(map[string]bool) | ||||
| func newDecoder(strict bool) *decoder { | ||||
| 	d := &decoder{mapType: defaultMapType, strict: strict} | ||||
| 	d.aliases = make(map[*node]bool) | ||||
| 	return d | ||||
| } | ||||
| 
 | ||||
|  | @ -251,7 +291,7 @@ func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { | |||
| //
 | ||||
| // If n holds a null value, prepare returns before doing anything.
 | ||||
| func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { | ||||
| 	if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") { | ||||
| 	if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { | ||||
| 		return out, false, false | ||||
| 	} | ||||
| 	again := true | ||||
|  | @ -308,16 +348,13 @@ func (d *decoder) document(n *node, out reflect.Value) (good bool) { | |||
| } | ||||
| 
 | ||||
| func (d *decoder) alias(n *node, out reflect.Value) (good bool) { | ||||
| 	an, ok := d.doc.anchors[n.value] | ||||
| 	if !ok { | ||||
| 		failf("unknown anchor '%s' referenced", n.value) | ||||
| 	} | ||||
| 	if d.aliases[n.value] { | ||||
| 	if d.aliases[n] { | ||||
| 		// TODO this could actually be allowed in some circumstances.
 | ||||
| 		failf("anchor '%s' value contains itself", n.value) | ||||
| 	} | ||||
| 	d.aliases[n.value] = true | ||||
| 	good = d.unmarshal(an, out) | ||||
| 	delete(d.aliases, n.value) | ||||
| 	d.aliases[n] = true | ||||
| 	good = d.unmarshal(n.alias, out) | ||||
| 	delete(d.aliases, n) | ||||
| 	return good | ||||
| } | ||||
| 
 | ||||
|  | @ -329,7 +366,7 @@ func resetMap(out reflect.Value) { | |||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { | ||||
| func (d *decoder) scalar(n *node, out reflect.Value) bool { | ||||
| 	var tag string | ||||
| 	var resolved interface{} | ||||
| 	if n.tag == "" && !n.implicit { | ||||
|  | @ -353,9 +390,26 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { | |||
| 		} | ||||
| 		return true | ||||
| 	} | ||||
| 	if s, ok := resolved.(string); ok && out.CanAddr() { | ||||
| 		if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { | ||||
| 			err := u.UnmarshalText([]byte(s)) | ||||
| 	if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { | ||||
| 		// We've resolved to exactly the type we want, so use that.
 | ||||
| 		out.Set(resolvedv) | ||||
| 		return true | ||||
| 	} | ||||
| 	// Perhaps we can use the value as a TextUnmarshaler to
 | ||||
| 	// set its value.
 | ||||
| 	if out.CanAddr() { | ||||
| 		u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) | ||||
| 		if ok { | ||||
| 			var text []byte | ||||
| 			if tag == yaml_BINARY_TAG { | ||||
| 				text = []byte(resolved.(string)) | ||||
| 			} else { | ||||
| 				// We let any value be unmarshaled into TextUnmarshaler.
 | ||||
| 				// That might be more lax than we'd like, but the
 | ||||
| 				// TextUnmarshaler itself should bowl out any dubious values.
 | ||||
| 				text = []byte(n.value) | ||||
| 			} | ||||
| 			err := u.UnmarshalText(text) | ||||
| 			if err != nil { | ||||
| 				fail(err) | ||||
| 			} | ||||
|  | @ -366,46 +420,54 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { | |||
| 	case reflect.String: | ||||
| 		if tag == yaml_BINARY_TAG { | ||||
| 			out.SetString(resolved.(string)) | ||||
| 			good = true | ||||
| 		} else if resolved != nil { | ||||
| 			return true | ||||
| 		} | ||||
| 		if resolved != nil { | ||||
| 			out.SetString(n.value) | ||||
| 			good = true | ||||
| 			return true | ||||
| 		} | ||||
| 	case reflect.Interface: | ||||
| 		if resolved == nil { | ||||
| 			out.Set(reflect.Zero(out.Type())) | ||||
| 		} else if tag == yaml_TIMESTAMP_TAG { | ||||
| 			// It looks like a timestamp but for backward compatibility
 | ||||
| 			// reasons we set it as a string, so that code that unmarshals
 | ||||
| 			// timestamp-like values into interface{} will continue to
 | ||||
| 			// see a string and not a time.Time.
 | ||||
| 			// TODO(v3) Drop this.
 | ||||
| 			out.Set(reflect.ValueOf(n.value)) | ||||
| 		} else { | ||||
| 			out.Set(reflect.ValueOf(resolved)) | ||||
| 		} | ||||
| 		good = true | ||||
| 		return true | ||||
| 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: | ||||
| 		switch resolved := resolved.(type) { | ||||
| 		case int: | ||||
| 			if !out.OverflowInt(int64(resolved)) { | ||||
| 				out.SetInt(int64(resolved)) | ||||
| 				good = true | ||||
| 				return true | ||||
| 			} | ||||
| 		case int64: | ||||
| 			if !out.OverflowInt(resolved) { | ||||
| 				out.SetInt(resolved) | ||||
| 				good = true | ||||
| 				return true | ||||
| 			} | ||||
| 		case uint64: | ||||
| 			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { | ||||
| 				out.SetInt(int64(resolved)) | ||||
| 				good = true | ||||
| 				return true | ||||
| 			} | ||||
| 		case float64: | ||||
| 			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { | ||||
| 				out.SetInt(int64(resolved)) | ||||
| 				good = true | ||||
| 				return true | ||||
| 			} | ||||
| 		case string: | ||||
| 			if out.Type() == durationType { | ||||
| 				d, err := time.ParseDuration(resolved) | ||||
| 				if err == nil { | ||||
| 					out.SetInt(int64(d)) | ||||
| 					good = true | ||||
| 					return true | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
|  | @ -414,44 +476,49 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { | |||
| 		case int: | ||||
| 			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { | ||||
| 				out.SetUint(uint64(resolved)) | ||||
| 				good = true | ||||
| 				return true | ||||
| 			} | ||||
| 		case int64: | ||||
| 			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { | ||||
| 				out.SetUint(uint64(resolved)) | ||||
| 				good = true | ||||
| 				return true | ||||
| 			} | ||||
| 		case uint64: | ||||
| 			if !out.OverflowUint(uint64(resolved)) { | ||||
| 				out.SetUint(uint64(resolved)) | ||||
| 				good = true | ||||
| 				return true | ||||
| 			} | ||||
| 		case float64: | ||||
| 			if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { | ||||
| 				out.SetUint(uint64(resolved)) | ||||
| 				good = true | ||||
| 				return true | ||||
| 			} | ||||
| 		} | ||||
| 	case reflect.Bool: | ||||
| 		switch resolved := resolved.(type) { | ||||
| 		case bool: | ||||
| 			out.SetBool(resolved) | ||||
| 			good = true | ||||
| 			return true | ||||
| 		} | ||||
| 	case reflect.Float32, reflect.Float64: | ||||
| 		switch resolved := resolved.(type) { | ||||
| 		case int: | ||||
| 			out.SetFloat(float64(resolved)) | ||||
| 			good = true | ||||
| 			return true | ||||
| 		case int64: | ||||
| 			out.SetFloat(float64(resolved)) | ||||
| 			good = true | ||||
| 			return true | ||||
| 		case uint64: | ||||
| 			out.SetFloat(float64(resolved)) | ||||
| 			good = true | ||||
| 			return true | ||||
| 		case float64: | ||||
| 			out.SetFloat(resolved) | ||||
| 			good = true | ||||
| 			return true | ||||
| 		} | ||||
| 	case reflect.Struct: | ||||
| 		if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { | ||||
| 			out.Set(resolvedv) | ||||
| 			return true | ||||
| 		} | ||||
| 	case reflect.Ptr: | ||||
| 		if out.Type().Elem() == reflect.TypeOf(resolved) { | ||||
|  | @ -459,13 +526,11 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { | |||
| 			elem := reflect.New(out.Type().Elem()) | ||||
| 			elem.Elem().Set(reflect.ValueOf(resolved)) | ||||
| 			out.Set(elem) | ||||
| 			good = true | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	if !good { | ||||
| 		d.terror(n, tag, out) | ||||
| 	} | ||||
| 	return good | ||||
| 	d.terror(n, tag, out) | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| func settableValueOf(i interface{}) reflect.Value { | ||||
|  | @ -482,6 +547,10 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { | |||
| 	switch out.Kind() { | ||||
| 	case reflect.Slice: | ||||
| 		out.Set(reflect.MakeSlice(out.Type(), l, l)) | ||||
| 	case reflect.Array: | ||||
| 		if l != out.Len() { | ||||
| 			failf("invalid array: want %d elements but got %d", out.Len(), l) | ||||
| 		} | ||||
| 	case reflect.Interface: | ||||
| 		// No type hints. Will have to use a generic sequence.
 | ||||
| 		iface = out | ||||
|  | @ -500,7 +569,9 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { | |||
| 			j++ | ||||
| 		} | ||||
| 	} | ||||
| 	out.Set(out.Slice(0, j)) | ||||
| 	if out.Kind() != reflect.Array { | ||||
| 		out.Set(out.Slice(0, j)) | ||||
| 	} | ||||
| 	if iface.IsValid() { | ||||
| 		iface.Set(out) | ||||
| 	} | ||||
|  | @ -561,7 +632,7 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { | |||
| 			} | ||||
| 			e := reflect.New(et).Elem() | ||||
| 			if d.unmarshal(n.children[i+1], e) { | ||||
| 				out.SetMapIndex(k, e) | ||||
| 				d.setMapIndex(n.children[i+1], out, k, e) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | @ -569,6 +640,14 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { | |||
| 	return true | ||||
| } | ||||
| 
 | ||||
| func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { | ||||
| 	if d.strict && out.MapIndex(k) != zeroValue { | ||||
| 		d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) | ||||
| 		return | ||||
| 	} | ||||
| 	out.SetMapIndex(k, v) | ||||
| } | ||||
| 
 | ||||
| func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { | ||||
| 	outt := out.Type() | ||||
| 	if outt.Elem() != mapItemType { | ||||
|  | @ -607,6 +686,19 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { | |||
| 	} | ||||
| 	name := settableValueOf("") | ||||
| 	l := len(n.children) | ||||
| 
 | ||||
| 	var inlineMap reflect.Value | ||||
| 	var elemType reflect.Type | ||||
| 	if sinfo.InlineMap != -1 { | ||||
| 		inlineMap = out.Field(sinfo.InlineMap) | ||||
| 		inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) | ||||
| 		elemType = inlineMap.Type().Elem() | ||||
| 	} | ||||
| 
 | ||||
| 	var doneFields []bool | ||||
| 	if d.strict { | ||||
| 		doneFields = make([]bool, len(sinfo.FieldsList)) | ||||
| 	} | ||||
| 	for i := 0; i < l; i += 2 { | ||||
| 		ni := n.children[i] | ||||
| 		if isMerge(ni) { | ||||
|  | @ -617,6 +709,13 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { | |||
| 			continue | ||||
| 		} | ||||
| 		if info, ok := sinfo.FieldsMap[name.String()]; ok { | ||||
| 			if d.strict { | ||||
| 				if doneFields[info.Id] { | ||||
| 					d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) | ||||
| 					continue | ||||
| 				} | ||||
| 				doneFields[info.Id] = true | ||||
| 			} | ||||
| 			var field reflect.Value | ||||
| 			if info.Inline == nil { | ||||
| 				field = out.Field(info.Num) | ||||
|  | @ -624,6 +723,15 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { | |||
| 				field = out.FieldByIndex(info.Inline) | ||||
| 			} | ||||
| 			d.unmarshal(n.children[i+1], field) | ||||
| 		} else if sinfo.InlineMap != -1 { | ||||
| 			if inlineMap.IsNil() { | ||||
| 				inlineMap.Set(reflect.MakeMap(inlineMap.Type())) | ||||
| 			} | ||||
| 			value := reflect.New(elemType).Elem() | ||||
| 			d.unmarshal(n.children[i+1], value) | ||||
| 			d.setMapIndex(n.children[i+1], inlineMap, name, value) | ||||
| 		} else if d.strict { | ||||
| 			d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
|  |  | |||
|  | @ -2,6 +2,7 @@ package yaml | |||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| ) | ||||
| 
 | ||||
| // Flush the buffer if needed.
 | ||||
|  | @ -664,9 +665,8 @@ func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, | |||
| 		return yaml_emitter_emit_mapping_start(emitter, event) | ||||
| 	default: | ||||
| 		return yaml_emitter_set_emitter_error(emitter, | ||||
| 			"expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") | ||||
| 			fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| // Expect ALIAS.
 | ||||
|  | @ -843,7 +843,7 @@ func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event | |||
| 	return true | ||||
| } | ||||
| 
 | ||||
| // Write an achor.
 | ||||
| // Write an anchor.
 | ||||
| func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { | ||||
| 	if emitter.anchor_data.anchor == nil { | ||||
| 		return true | ||||
|  | @ -995,10 +995,10 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { | |||
| 		break_space    = false | ||||
| 		space_break    = false | ||||
| 
 | ||||
| 		preceeded_by_whitespace = false | ||||
| 		followed_by_whitespace  = false | ||||
| 		previous_space          = false | ||||
| 		previous_break          = false | ||||
| 		preceded_by_whitespace = false | ||||
| 		followed_by_whitespace = false | ||||
| 		previous_space         = false | ||||
| 		previous_break         = false | ||||
| 	) | ||||
| 
 | ||||
| 	emitter.scalar_data.value = value | ||||
|  | @ -1017,9 +1017,9 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { | |||
| 		flow_indicators = true | ||||
| 	} | ||||
| 
 | ||||
| 	preceeded_by_whitespace = true | ||||
| 	preceded_by_whitespace = true | ||||
| 	for i, w := 0, 0; i < len(value); i += w { | ||||
| 		w = width(value[0]) | ||||
| 		w = width(value[i]) | ||||
| 		followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) | ||||
| 
 | ||||
| 		if i == 0 { | ||||
|  | @ -1048,7 +1048,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { | |||
| 					block_indicators = true | ||||
| 				} | ||||
| 			case '#': | ||||
| 				if preceeded_by_whitespace { | ||||
| 				if preceded_by_whitespace { | ||||
| 					flow_indicators = true | ||||
| 					block_indicators = true | ||||
| 				} | ||||
|  | @ -1089,7 +1089,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { | |||
| 		} | ||||
| 
 | ||||
| 		// [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
 | ||||
| 		preceeded_by_whitespace = is_blankz(value, i) | ||||
| 		preceded_by_whitespace = is_blankz(value, i) | ||||
| 	} | ||||
| 
 | ||||
| 	emitter.scalar_data.multiline = line_breaks | ||||
|  |  | |||
|  | @ -2,12 +2,15 @@ package yaml | |||
| 
 | ||||
| import ( | ||||
| 	"encoding" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"reflect" | ||||
| 	"regexp" | ||||
| 	"sort" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"time" | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
| 
 | ||||
| type encoder struct { | ||||
|  | @ -15,25 +18,39 @@ type encoder struct { | |||
| 	event   yaml_event_t | ||||
| 	out     []byte | ||||
| 	flow    bool | ||||
| 	// doneInit holds whether the initial stream_start_event has been
 | ||||
| 	// emitted.
 | ||||
| 	doneInit bool | ||||
| } | ||||
| 
 | ||||
| func newEncoder() (e *encoder) { | ||||
| 	e = &encoder{} | ||||
| 	e.must(yaml_emitter_initialize(&e.emitter)) | ||||
| func newEncoder() *encoder { | ||||
| 	e := &encoder{} | ||||
| 	yaml_emitter_initialize(&e.emitter) | ||||
| 	yaml_emitter_set_output_string(&e.emitter, &e.out) | ||||
| 	yaml_emitter_set_unicode(&e.emitter, true) | ||||
| 	e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) | ||||
| 	e.emit() | ||||
| 	e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) | ||||
| 	e.emit() | ||||
| 	return e | ||||
| } | ||||
| 
 | ||||
| func (e *encoder) finish() { | ||||
| 	e.must(yaml_document_end_event_initialize(&e.event, true)) | ||||
| func newEncoderWithWriter(w io.Writer) *encoder { | ||||
| 	e := &encoder{} | ||||
| 	yaml_emitter_initialize(&e.emitter) | ||||
| 	yaml_emitter_set_output_writer(&e.emitter, w) | ||||
| 	yaml_emitter_set_unicode(&e.emitter, true) | ||||
| 	return e | ||||
| } | ||||
| 
 | ||||
| func (e *encoder) init() { | ||||
| 	if e.doneInit { | ||||
| 		return | ||||
| 	} | ||||
| 	yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) | ||||
| 	e.emit() | ||||
| 	e.doneInit = true | ||||
| } | ||||
| 
 | ||||
| func (e *encoder) finish() { | ||||
| 	e.emitter.open_ended = false | ||||
| 	e.must(yaml_stream_end_event_initialize(&e.event)) | ||||
| 	yaml_stream_end_event_initialize(&e.event) | ||||
| 	e.emit() | ||||
| } | ||||
| 
 | ||||
|  | @ -43,9 +60,7 @@ func (e *encoder) destroy() { | |||
| 
 | ||||
| func (e *encoder) emit() { | ||||
| 	// This will internally delete the e.event value.
 | ||||
| 	if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { | ||||
| 		e.must(false) | ||||
| 	} | ||||
| 	e.must(yaml_emitter_emit(&e.emitter, &e.event)) | ||||
| } | ||||
| 
 | ||||
| func (e *encoder) must(ok bool) { | ||||
|  | @ -58,13 +73,28 @@ func (e *encoder) must(ok bool) { | |||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (e *encoder) marshalDoc(tag string, in reflect.Value) { | ||||
| 	e.init() | ||||
| 	yaml_document_start_event_initialize(&e.event, nil, nil, true) | ||||
| 	e.emit() | ||||
| 	e.marshal(tag, in) | ||||
| 	yaml_document_end_event_initialize(&e.event, true) | ||||
| 	e.emit() | ||||
| } | ||||
| 
 | ||||
| func (e *encoder) marshal(tag string, in reflect.Value) { | ||||
| 	if !in.IsValid() { | ||||
| 	if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { | ||||
| 		e.nilv() | ||||
| 		return | ||||
| 	} | ||||
| 	iface := in.Interface() | ||||
| 	if m, ok := iface.(Marshaler); ok { | ||||
| 	switch m := iface.(type) { | ||||
| 	case time.Time, *time.Time: | ||||
| 		// Although time.Time implements TextMarshaler,
 | ||||
| 		// we don't want to treat it as a string for YAML
 | ||||
| 		// purposes because YAML has special support for
 | ||||
| 		// timestamps.
 | ||||
| 	case Marshaler: | ||||
| 		v, err := m.MarshalYAML() | ||||
| 		if err != nil { | ||||
| 			fail(err) | ||||
|  | @ -74,31 +104,34 @@ func (e *encoder) marshal(tag string, in reflect.Value) { | |||
| 			return | ||||
| 		} | ||||
| 		in = reflect.ValueOf(v) | ||||
| 	} else if m, ok := iface.(encoding.TextMarshaler); ok { | ||||
| 	case encoding.TextMarshaler: | ||||
| 		text, err := m.MarshalText() | ||||
| 		if err != nil { | ||||
| 			fail(err) | ||||
| 		} | ||||
| 		in = reflect.ValueOf(string(text)) | ||||
| 	case nil: | ||||
| 		e.nilv() | ||||
| 		return | ||||
| 	} | ||||
| 	switch in.Kind() { | ||||
| 	case reflect.Interface: | ||||
| 		if in.IsNil() { | ||||
| 			e.nilv() | ||||
| 		} else { | ||||
| 			e.marshal(tag, in.Elem()) | ||||
| 		} | ||||
| 		e.marshal(tag, in.Elem()) | ||||
| 	case reflect.Map: | ||||
| 		e.mapv(tag, in) | ||||
| 	case reflect.Ptr: | ||||
| 		if in.IsNil() { | ||||
| 			e.nilv() | ||||
| 		if in.Type() == ptrTimeType { | ||||
| 			e.timev(tag, in.Elem()) | ||||
| 		} else { | ||||
| 			e.marshal(tag, in.Elem()) | ||||
| 		} | ||||
| 	case reflect.Struct: | ||||
| 		e.structv(tag, in) | ||||
| 	case reflect.Slice: | ||||
| 		if in.Type() == timeType { | ||||
| 			e.timev(tag, in) | ||||
| 		} else { | ||||
| 			e.structv(tag, in) | ||||
| 		} | ||||
| 	case reflect.Slice, reflect.Array: | ||||
| 		if in.Type().Elem() == mapItemType { | ||||
| 			e.itemsv(tag, in) | ||||
| 		} else { | ||||
|  | @ -164,6 +197,22 @@ func (e *encoder) structv(tag string, in reflect.Value) { | |||
| 			e.flow = info.Flow | ||||
| 			e.marshal("", value) | ||||
| 		} | ||||
| 		if sinfo.InlineMap >= 0 { | ||||
| 			m := in.Field(sinfo.InlineMap) | ||||
| 			if m.Len() > 0 { | ||||
| 				e.flow = false | ||||
| 				keys := keyList(m.MapKeys()) | ||||
| 				sort.Sort(keys) | ||||
| 				for _, k := range keys { | ||||
| 					if _, found := sinfo.FieldsMap[k.String()]; found { | ||||
| 						panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) | ||||
| 					} | ||||
| 					e.marshal("", k) | ||||
| 					e.flow = false | ||||
| 					e.marshal("", m.MapIndex(k)) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
| 
 | ||||
|  | @ -174,10 +223,10 @@ func (e *encoder) mappingv(tag string, f func()) { | |||
| 		e.flow = false | ||||
| 		style = yaml_FLOW_MAPPING_STYLE | ||||
| 	} | ||||
| 	e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) | ||||
| 	yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) | ||||
| 	e.emit() | ||||
| 	f() | ||||
| 	e.must(yaml_mapping_end_event_initialize(&e.event)) | ||||
| 	yaml_mapping_end_event_initialize(&e.event) | ||||
| 	e.emit() | ||||
| } | ||||
| 
 | ||||
|  | @ -223,23 +272,36 @@ var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0 | |||
| func (e *encoder) stringv(tag string, in reflect.Value) { | ||||
| 	var style yaml_scalar_style_t | ||||
| 	s := in.String() | ||||
| 	rtag, rs := resolve("", s) | ||||
| 	if rtag == yaml_BINARY_TAG { | ||||
| 		if tag == "" || tag == yaml_STR_TAG { | ||||
| 			tag = rtag | ||||
| 			s = rs.(string) | ||||
| 		} else if tag == yaml_BINARY_TAG { | ||||
| 	canUsePlain := true | ||||
| 	switch { | ||||
| 	case !utf8.ValidString(s): | ||||
| 		if tag == yaml_BINARY_TAG { | ||||
| 			failf("explicitly tagged !!binary data must be base64-encoded") | ||||
| 		} else { | ||||
| 		} | ||||
| 		if tag != "" { | ||||
| 			failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) | ||||
| 		} | ||||
| 		// It can't be encoded directly as YAML so use a binary tag
 | ||||
| 		// and encode it as base64.
 | ||||
| 		tag = yaml_BINARY_TAG | ||||
| 		s = encodeBase64(s) | ||||
| 	case tag == "": | ||||
| 		// Check to see if it would resolve to a specific
 | ||||
| 		// tag when encoded unquoted. If it doesn't,
 | ||||
| 		// there's no need to quote it.
 | ||||
| 		rtag, _ := resolve("", s) | ||||
| 		canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) | ||||
| 	} | ||||
| 	if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { | ||||
| 		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE | ||||
| 	} else if strings.Contains(s, "\n") { | ||||
| 	// Note: it's possible for user code to emit invalid YAML
 | ||||
| 	// if they explicitly specify a tag and a string containing
 | ||||
| 	// text that's incompatible with that tag.
 | ||||
| 	switch { | ||||
| 	case strings.Contains(s, "\n"): | ||||
| 		style = yaml_LITERAL_SCALAR_STYLE | ||||
| 	} else { | ||||
| 	case canUsePlain: | ||||
| 		style = yaml_PLAIN_SCALAR_STYLE | ||||
| 	default: | ||||
| 		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE | ||||
| 	} | ||||
| 	e.emitScalar(s, "", tag, style) | ||||
| } | ||||
|  | @ -264,9 +326,20 @@ func (e *encoder) uintv(tag string, in reflect.Value) { | |||
| 	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) | ||||
| } | ||||
| 
 | ||||
| func (e *encoder) timev(tag string, in reflect.Value) { | ||||
| 	t := in.Interface().(time.Time) | ||||
| 	s := t.Format(time.RFC3339Nano) | ||||
| 	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) | ||||
| } | ||||
| 
 | ||||
| func (e *encoder) floatv(tag string, in reflect.Value) { | ||||
| 	// FIXME: Handle 64 bits here.
 | ||||
| 	s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) | ||||
| 	// Issue #352: When formatting, use the precision of the underlying value
 | ||||
| 	precision := 64 | ||||
| 	if in.Kind() == reflect.Float32 { | ||||
| 		precision = 32 | ||||
| 	} | ||||
| 
 | ||||
| 	s := strconv.FormatFloat(in.Float(), 'g', -1, precision) | ||||
| 	switch s { | ||||
| 	case "+Inf": | ||||
| 		s = ".inf" | ||||
|  |  | |||
|  | @ -166,7 +166,6 @@ func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool | |||
| 	default: | ||||
| 		panic("invalid parser state") | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| // Parse the production:
 | ||||
|  |  | |||
|  | @ -93,9 +93,18 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { | |||
| 		panic("read handler must be set") | ||||
| 	} | ||||
| 
 | ||||
| 	// [Go] This function was changed to guarantee the requested length size at EOF.
 | ||||
| 	// The fact we need to do this is pretty awful, but the description above implies
 | ||||
| 	// for that to be the case, and there are tests 
 | ||||
| 
 | ||||
| 	// If the EOF flag is set and the raw buffer is empty, do nothing.
 | ||||
| 	if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { | ||||
| 		return true | ||||
| 		// [Go] ACTUALLY! Read the documentation of this function above.
 | ||||
| 		// This is just broken. To return true, we need to have the
 | ||||
| 		// given length in the buffer. Not doing that means every single
 | ||||
| 		// check that calls this function to make sure the buffer has a
 | ||||
| 		// given length is Go) panicking; or C) accessing invalid memory.
 | ||||
| 		//return true
 | ||||
| 	} | ||||
| 
 | ||||
| 	// Return if the buffer contains enough characters.
 | ||||
|  | @ -247,7 +256,7 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { | |||
| 				if parser.encoding == yaml_UTF16LE_ENCODING { | ||||
| 					low, high = 0, 1 | ||||
| 				} else { | ||||
| 					high, low = 1, 0 | ||||
| 					low, high = 1, 0 | ||||
| 				} | ||||
| 
 | ||||
| 				// The UTF-16 encoding is not as simple as one might
 | ||||
|  | @ -357,23 +366,26 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { | |||
| 			if value <= 0x7F { | ||||
| 				// 0000 0000-0000 007F . 0xxxxxxx
 | ||||
| 				parser.buffer[buffer_len+0] = byte(value) | ||||
| 				buffer_len += 1 | ||||
| 			} else if value <= 0x7FF { | ||||
| 				// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
 | ||||
| 				parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) | ||||
| 				parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) | ||||
| 				buffer_len += 2 | ||||
| 			} else if value <= 0xFFFF { | ||||
| 				// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
 | ||||
| 				parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) | ||||
| 				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) | ||||
| 				parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) | ||||
| 				buffer_len += 3 | ||||
| 			} else { | ||||
| 				// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
 | ||||
| 				parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) | ||||
| 				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) | ||||
| 				parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) | ||||
| 				parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) | ||||
| 				buffer_len += 4 | ||||
| 			} | ||||
| 			buffer_len += width | ||||
| 
 | ||||
| 			parser.unread++ | ||||
| 		} | ||||
|  | @ -386,6 +398,15 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { | |||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	// [Go] Read the documentation of this function above. To return true,
 | ||||
| 	// we need to have the given length in the buffer. Not doing that means
 | ||||
| 	// every single check that calls this function to make sure the buffer
 | ||||
| 	// has a given length is Go) panicking; or C) accessing invalid memory.
 | ||||
| 	// This happens here due to the EOF above breaking early.
 | ||||
| 	for buffer_len < length { | ||||
| 		parser.buffer[buffer_len] = 0 | ||||
| 		buffer_len++ | ||||
| 	} | ||||
| 	parser.buffer = parser.buffer[:buffer_len] | ||||
| 	return true | ||||
| } | ||||
|  |  | |||
|  | @ -3,9 +3,10 @@ package yaml | |||
| import ( | ||||
| 	"encoding/base64" | ||||
| 	"math" | ||||
| 	"regexp" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"unicode/utf8" | ||||
| 	"time" | ||||
| ) | ||||
| 
 | ||||
| type resolveMapItem struct { | ||||
|  | @ -74,12 +75,14 @@ func longTag(tag string) string { | |||
| 
 | ||||
| func resolvableTag(tag string) bool { | ||||
| 	switch tag { | ||||
| 	case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: | ||||
| 	case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: | ||||
| 		return true | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) | ||||
| 
 | ||||
| func resolve(tag string, in string) (rtag string, out interface{}) { | ||||
| 	if !resolvableTag(tag) { | ||||
| 		return tag, in | ||||
|  | @ -89,6 +92,19 @@ func resolve(tag string, in string) (rtag string, out interface{}) { | |||
| 		switch tag { | ||||
| 		case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: | ||||
| 			return | ||||
| 		case yaml_FLOAT_TAG: | ||||
| 			if rtag == yaml_INT_TAG { | ||||
| 				switch v := out.(type) { | ||||
| 				case int64: | ||||
| 					rtag = yaml_FLOAT_TAG | ||||
| 					out = float64(v) | ||||
| 					return | ||||
| 				case int: | ||||
| 					rtag = yaml_FLOAT_TAG | ||||
| 					out = float64(v) | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) | ||||
| 	}() | ||||
|  | @ -122,6 +138,15 @@ func resolve(tag string, in string) (rtag string, out interface{}) { | |||
| 
 | ||||
| 		case 'D', 'S': | ||||
| 			// Int, float, or timestamp.
 | ||||
| 			// Only try values as a timestamp if the value is unquoted or there's an explicit
 | ||||
| 			// !!timestamp tag.
 | ||||
| 			if tag == "" || tag == yaml_TIMESTAMP_TAG { | ||||
| 				t, ok := parseTimestamp(in) | ||||
| 				if ok { | ||||
| 					return yaml_TIMESTAMP_TAG, t | ||||
| 				} | ||||
| 			} | ||||
| 
 | ||||
| 			plain := strings.Replace(in, "_", "", -1) | ||||
| 			intv, err := strconv.ParseInt(plain, 0, 64) | ||||
| 			if err == nil { | ||||
|  | @ -135,9 +160,11 @@ func resolve(tag string, in string) (rtag string, out interface{}) { | |||
| 			if err == nil { | ||||
| 				return yaml_INT_TAG, uintv | ||||
| 			} | ||||
| 			floatv, err := strconv.ParseFloat(plain, 64) | ||||
| 			if err == nil { | ||||
| 				return yaml_FLOAT_TAG, floatv | ||||
| 			if yamlStyleFloat.MatchString(plain) { | ||||
| 				floatv, err := strconv.ParseFloat(plain, 64) | ||||
| 				if err == nil { | ||||
| 					return yaml_FLOAT_TAG, floatv | ||||
| 				} | ||||
| 			} | ||||
| 			if strings.HasPrefix(plain, "0b") { | ||||
| 				intv, err := strconv.ParseInt(plain[2:], 2, 64) | ||||
|  | @ -153,28 +180,20 @@ func resolve(tag string, in string) (rtag string, out interface{}) { | |||
| 					return yaml_INT_TAG, uintv | ||||
| 				} | ||||
| 			} else if strings.HasPrefix(plain, "-0b") { | ||||
| 				intv, err := strconv.ParseInt(plain[3:], 2, 64) | ||||
| 				intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) | ||||
| 				if err == nil { | ||||
| 					if intv == int64(int(intv)) { | ||||
| 						return yaml_INT_TAG, -int(intv) | ||||
| 					if true || intv == int64(int(intv)) { | ||||
| 						return yaml_INT_TAG, int(intv) | ||||
| 					} else { | ||||
| 						return yaml_INT_TAG, -intv | ||||
| 						return yaml_INT_TAG, intv | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 			// XXX Handle timestamps here.
 | ||||
| 
 | ||||
| 		default: | ||||
| 			panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") | ||||
| 		} | ||||
| 	} | ||||
| 	if tag == yaml_BINARY_TAG { | ||||
| 		return yaml_BINARY_TAG, in | ||||
| 	} | ||||
| 	if utf8.ValidString(in) { | ||||
| 		return yaml_STR_TAG, in | ||||
| 	} | ||||
| 	return yaml_BINARY_TAG, encodeBase64(in) | ||||
| 	return yaml_STR_TAG, in | ||||
| } | ||||
| 
 | ||||
| // encodeBase64 encodes s as base64 that is broken up into multiple lines
 | ||||
|  | @ -201,3 +220,39 @@ func encodeBase64(s string) string { | |||
| 	} | ||||
| 	return string(out[:k]) | ||||
| } | ||||
| 
 | ||||
| // This is a subset of the formats allowed by the regular expression
 | ||||
| // defined at http://yaml.org/type/timestamp.html.
 | ||||
| var allowedTimestampFormats = []string{ | ||||
| 	"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
 | ||||
| 	"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
 | ||||
| 	"2006-1-2 15:4:5.999999999",       // space separated with no time zone
 | ||||
| 	"2006-1-2",                        // date only
 | ||||
| 	// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
 | ||||
| 	// from the set of examples.
 | ||||
| } | ||||
| 
 | ||||
| // parseTimestamp parses s as a timestamp string and
 | ||||
| // returns the timestamp and reports whether it succeeded.
 | ||||
| // Timestamp formats are defined at http://yaml.org/type/timestamp.html
 | ||||
| func parseTimestamp(s string) (time.Time, bool) { | ||||
| 	// TODO write code to check all the formats supported by
 | ||||
| 	// http://yaml.org/type/timestamp.html instead of using time.Parse.
 | ||||
| 
 | ||||
| 	// Quick check: all date formats start with YYYY-.
 | ||||
| 	i := 0 | ||||
| 	for ; i < len(s); i++ { | ||||
| 		if c := s[i]; c < '0' || c > '9' { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	if i != 4 || i == len(s) || s[i] != '-' { | ||||
| 		return time.Time{}, false | ||||
| 	} | ||||
| 	for _, format := range allowedTimestampFormats { | ||||
| 		if t, err := time.Parse(format, s); err == nil { | ||||
| 			return t, true | ||||
| 		} | ||||
| 	} | ||||
| 	return time.Time{}, false | ||||
| } | ||||
|  |  | |||
|  | @ -9,7 +9,7 @@ import ( | |||
| // ************
 | ||||
| //
 | ||||
| // The following notes assume that you are familiar with the YAML specification
 | ||||
| // (http://yaml.org/spec/cvs/current.html).  We mostly follow it, although in
 | ||||
| // (http://yaml.org/spec/1.2/spec.html).  We mostly follow it, although in
 | ||||
| // some cases we are less restrictive that it requires.
 | ||||
| //
 | ||||
| // The process of transforming a YAML stream into a sequence of events is
 | ||||
|  | @ -611,7 +611,7 @@ func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, co | |||
| 	if directive { | ||||
| 		context = "while parsing a %TAG directive" | ||||
| 	} | ||||
| 	return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") | ||||
| 	return yaml_parser_set_scanner_error(parser, context, context_mark, problem) | ||||
| } | ||||
| 
 | ||||
| func trace(args ...interface{}) func() { | ||||
|  | @ -871,12 +871,6 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { | |||
| 
 | ||||
| 	required := parser.flow_level == 0 && parser.indent == parser.mark.column | ||||
| 
 | ||||
| 	// A simple key is required only when it is the first token in the current
 | ||||
| 	// line.  Therefore it is always allowed.  But we add a check anyway.
 | ||||
| 	if required && !parser.simple_key_allowed { | ||||
| 		panic("should not happen") | ||||
| 	} | ||||
| 
 | ||||
| 	//
 | ||||
| 	// If the current position may start a simple key, save it.
 | ||||
| 	//
 | ||||
|  | @ -961,7 +955,7 @@ func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml | |||
| } | ||||
| 
 | ||||
| // Pop indentation levels from the indents stack until the current level
 | ||||
| // becomes less or equal to the column.  For each intendation level, append
 | ||||
| // becomes less or equal to the column.  For each indentation level, append
 | ||||
| // the BLOCK-END token.
 | ||||
| func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { | ||||
| 	// In the flow context, do nothing.
 | ||||
|  | @ -969,7 +963,7 @@ func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { | |||
| 		return true | ||||
| 	} | ||||
| 
 | ||||
| 	// Loop through the intendation levels in the stack.
 | ||||
| 	// Loop through the indentation levels in the stack.
 | ||||
| 	for parser.indent > column { | ||||
| 		// Create a token and append it to the queue.
 | ||||
| 		token := yaml_token_t{ | ||||
|  | @ -1546,7 +1540,7 @@ func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool | |||
| 		// Unknown directive.
 | ||||
| 	} else { | ||||
| 		yaml_parser_set_scanner_error(parser, "while scanning a directive", | ||||
| 			start_mark, "found uknown directive name") | ||||
| 			start_mark, "found unknown directive name") | ||||
| 		return false | ||||
| 	} | ||||
| 
 | ||||
|  | @ -1944,7 +1938,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma | |||
| 	} else { | ||||
| 		// It's either the '!' tag or not really a tag handle.  If it's a %TAG
 | ||||
| 		// directive, it's an error.  If it's a tag token, it must be a part of URI.
 | ||||
| 		if directive && !(s[0] == '!' && s[1] == 0) { | ||||
| 		if directive && string(s) != "!" { | ||||
| 			yaml_parser_set_scanner_tag_error(parser, directive, | ||||
| 				start_mark, "did not find expected '!'") | ||||
| 			return false | ||||
|  | @ -1959,6 +1953,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma | |||
| func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { | ||||
| 	//size_t length = head ? strlen((char *)head) : 0
 | ||||
| 	var s []byte | ||||
| 	hasTag := len(head) > 0 | ||||
| 
 | ||||
| 	// Copy the head if needed.
 | ||||
| 	//
 | ||||
|  | @ -2000,10 +1995,10 @@ func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte | |||
| 		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { | ||||
| 			return false | ||||
| 		} | ||||
| 		hasTag = true | ||||
| 	} | ||||
| 
 | ||||
| 	// Check if the tag is non-empty.
 | ||||
| 	if len(s) == 0 { | ||||
| 	if !hasTag { | ||||
| 		yaml_parser_set_scanner_tag_error(parser, directive, | ||||
| 			start_mark, "did not find expected tag URI") | ||||
| 		return false | ||||
|  | @ -2085,14 +2080,14 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l | |||
| 			return false | ||||
| 		} | ||||
| 		if is_digit(parser.buffer, parser.buffer_pos) { | ||||
| 			// Check that the intendation is greater than 0.
 | ||||
| 			// Check that the indentation is greater than 0.
 | ||||
| 			if parser.buffer[parser.buffer_pos] == '0' { | ||||
| 				yaml_parser_set_scanner_error(parser, "while scanning a block scalar", | ||||
| 					start_mark, "found an intendation indicator equal to 0") | ||||
| 					start_mark, "found an indentation indicator equal to 0") | ||||
| 				return false | ||||
| 			} | ||||
| 
 | ||||
| 			// Get the intendation level and eat the indicator.
 | ||||
| 			// Get the indentation level and eat the indicator.
 | ||||
| 			increment = as_digit(parser.buffer, parser.buffer_pos) | ||||
| 			skip(parser) | ||||
| 		} | ||||
|  | @ -2102,7 +2097,7 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l | |||
| 
 | ||||
| 		if parser.buffer[parser.buffer_pos] == '0' { | ||||
| 			yaml_parser_set_scanner_error(parser, "while scanning a block scalar", | ||||
| 				start_mark, "found an intendation indicator equal to 0") | ||||
| 				start_mark, "found an indentation indicator equal to 0") | ||||
| 			return false | ||||
| 		} | ||||
| 		increment = as_digit(parser.buffer, parser.buffer_pos) | ||||
|  | @ -2157,7 +2152,7 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l | |||
| 
 | ||||
| 	end_mark := parser.mark | ||||
| 
 | ||||
| 	// Set the intendation level if it was specified.
 | ||||
| 	// Set the indentation level if it was specified.
 | ||||
| 	var indent int | ||||
| 	if increment > 0 { | ||||
| 		if parser.indent >= 0 { | ||||
|  | @ -2217,7 +2212,7 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l | |||
| 
 | ||||
| 		leading_break = read_line(parser, leading_break) | ||||
| 
 | ||||
| 		// Eat the following intendation spaces and line breaks.
 | ||||
| 		// Eat the following indentation spaces and line breaks.
 | ||||
| 		if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { | ||||
| 			return false | ||||
| 		} | ||||
|  | @ -2245,15 +2240,15 @@ func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, l | |||
| 	return true | ||||
| } | ||||
| 
 | ||||
| // Scan intendation spaces and line breaks for a block scalar.  Determine the
 | ||||
| // intendation level if needed.
 | ||||
| // Scan indentation spaces and line breaks for a block scalar.  Determine the
 | ||||
| // indentation level if needed.
 | ||||
| func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { | ||||
| 	*end_mark = parser.mark | ||||
| 
 | ||||
| 	// Eat the intendation spaces and line breaks.
 | ||||
| 	// Eat the indentation spaces and line breaks.
 | ||||
| 	max_indent := 0 | ||||
| 	for { | ||||
| 		// Eat the intendation spaces.
 | ||||
| 		// Eat the indentation spaces.
 | ||||
| 		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { | ||||
| 			return false | ||||
| 		} | ||||
|  | @ -2267,10 +2262,10 @@ func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, br | |||
| 			max_indent = parser.mark.column | ||||
| 		} | ||||
| 
 | ||||
| 		// Check for a tab character messing the intendation.
 | ||||
| 		// Check for a tab character messing the indentation.
 | ||||
| 		if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { | ||||
| 			return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", | ||||
| 				start_mark, "found a tab character where an intendation space is expected") | ||||
| 				start_mark, "found a tab character where an indentation space is expected") | ||||
| 		} | ||||
| 
 | ||||
| 		// Have we found a non-empty line?
 | ||||
|  | @ -2474,6 +2469,10 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si | |||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { | ||||
| 			return false | ||||
| 		} | ||||
| 
 | ||||
| 		// Check if we are at the end of the scalar.
 | ||||
| 		if single { | ||||
| 			if parser.buffer[parser.buffer_pos] == '\'' { | ||||
|  | @ -2486,10 +2485,6 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si | |||
| 		} | ||||
| 
 | ||||
| 		// Consume blank characters.
 | ||||
| 		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { | ||||
| 			return false | ||||
| 		} | ||||
| 
 | ||||
| 		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { | ||||
| 			if is_blank(parser.buffer, parser.buffer_pos) { | ||||
| 				// Consume a space or a tab character.
 | ||||
|  | @ -2591,19 +2586,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b | |||
| 		// Consume non-blank characters.
 | ||||
| 		for !is_blankz(parser.buffer, parser.buffer_pos) { | ||||
| 
 | ||||
| 			// Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
 | ||||
| 			if parser.flow_level > 0 && | ||||
| 				parser.buffer[parser.buffer_pos] == ':' && | ||||
| 				!is_blankz(parser.buffer, parser.buffer_pos+1) { | ||||
| 				yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", | ||||
| 					start_mark, "found unexpected ':'") | ||||
| 				return false | ||||
| 			} | ||||
| 
 | ||||
| 			// Check for indicators that may end a plain scalar.
 | ||||
| 			if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || | ||||
| 				(parser.flow_level > 0 && | ||||
| 					(parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || | ||||
| 					(parser.buffer[parser.buffer_pos] == ',' || | ||||
| 						parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || | ||||
| 						parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || | ||||
| 						parser.buffer[parser.buffer_pos] == '}')) { | ||||
|  | @ -2655,10 +2641,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b | |||
| 		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { | ||||
| 			if is_blank(parser.buffer, parser.buffer_pos) { | ||||
| 
 | ||||
| 				// Check for tab character that abuse intendation.
 | ||||
| 				// Check for tab characters that abuse indentation.
 | ||||
| 				if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { | ||||
| 					yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", | ||||
| 						start_mark, "found a tab character that violate intendation") | ||||
| 						start_mark, "found a tab character that violates indentation") | ||||
| 					return false | ||||
| 				} | ||||
| 
 | ||||
|  | @ -2687,7 +2673,7 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b | |||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		// Check intendation level.
 | ||||
| 		// Check indentation level.
 | ||||
| 		if parser.flow_level == 0 && parser.mark.column < indent { | ||||
| 			break | ||||
| 		} | ||||
|  |  | |||
|  | @ -51,6 +51,15 @@ func (l keyList) Less(i, j int) bool { | |||
| 		} | ||||
| 		var ai, bi int | ||||
| 		var an, bn int64 | ||||
| 		if ar[i] == '0' || br[i] == '0' { | ||||
| 			for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { | ||||
| 				if ar[j] != '0' { | ||||
| 					an = 1 | ||||
| 					bn = 1 | ||||
| 					break | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { | ||||
| 			an = an*10 + int64(ar[ai]-'0') | ||||
| 		} | ||||
|  |  | |||
|  | @ -18,72 +18,9 @@ func yaml_emitter_flush(emitter *yaml_emitter_t) bool { | |||
| 		return true | ||||
| 	} | ||||
| 
 | ||||
| 	// If the output encoding is UTF-8, we don't need to recode the buffer.
 | ||||
| 	if emitter.encoding == yaml_UTF8_ENCODING { | ||||
| 		if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { | ||||
| 			return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) | ||||
| 		} | ||||
| 		emitter.buffer_pos = 0 | ||||
| 		return true | ||||
| 	} | ||||
| 
 | ||||
| 	// Recode the buffer into the raw buffer.
 | ||||
| 	var low, high int | ||||
| 	if emitter.encoding == yaml_UTF16LE_ENCODING { | ||||
| 		low, high = 0, 1 | ||||
| 	} else { | ||||
| 		high, low = 1, 0 | ||||
| 	} | ||||
| 
 | ||||
| 	pos := 0 | ||||
| 	for pos < emitter.buffer_pos { | ||||
| 		// See the "reader.c" code for more details on UTF-8 encoding.  Note
 | ||||
| 		// that we assume that the buffer contains a valid UTF-8 sequence.
 | ||||
| 
 | ||||
| 		// Read the next UTF-8 character.
 | ||||
| 		octet := emitter.buffer[pos] | ||||
| 
 | ||||
| 		var w int | ||||
| 		var value rune | ||||
| 		switch { | ||||
| 		case octet&0x80 == 0x00: | ||||
| 			w, value = 1, rune(octet&0x7F) | ||||
| 		case octet&0xE0 == 0xC0: | ||||
| 			w, value = 2, rune(octet&0x1F) | ||||
| 		case octet&0xF0 == 0xE0: | ||||
| 			w, value = 3, rune(octet&0x0F) | ||||
| 		case octet&0xF8 == 0xF0: | ||||
| 			w, value = 4, rune(octet&0x07) | ||||
| 		} | ||||
| 		for k := 1; k < w; k++ { | ||||
| 			octet = emitter.buffer[pos+k] | ||||
| 			value = (value << 6) + (rune(octet) & 0x3F) | ||||
| 		} | ||||
| 		pos += w | ||||
| 
 | ||||
| 		// Write the character.
 | ||||
| 		if value < 0x10000 { | ||||
| 			var b [2]byte | ||||
| 			b[high] = byte(value >> 8) | ||||
| 			b[low] = byte(value & 0xFF) | ||||
| 			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) | ||||
| 		} else { | ||||
| 			// Write the character using a surrogate pair (check "reader.c").
 | ||||
| 			var b [4]byte | ||||
| 			value -= 0x10000 | ||||
| 			b[high] = byte(0xD8 + (value >> 18)) | ||||
| 			b[low] = byte((value >> 10) & 0xFF) | ||||
| 			b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) | ||||
| 			b[low+2] = byte(value & 0xFF) | ||||
| 			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// Write the raw buffer.
 | ||||
| 	if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { | ||||
| 	if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { | ||||
| 		return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) | ||||
| 	} | ||||
| 	emitter.buffer_pos = 0 | ||||
| 	emitter.raw_buffer = emitter.raw_buffer[:0] | ||||
| 	return true | ||||
| } | ||||
|  |  | |||
|  | @ -9,6 +9,7 @@ package yaml | |||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"reflect" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
|  | @ -77,8 +78,65 @@ type Marshaler interface { | |||
| // supported tag options.
 | ||||
| //
 | ||||
| func Unmarshal(in []byte, out interface{}) (err error) { | ||||
| 	return unmarshal(in, out, false) | ||||
| } | ||||
| 
 | ||||
| // UnmarshalStrict is like Unmarshal except that any fields that are found
 | ||||
| // in the data that do not have corresponding struct members, or mapping
 | ||||
| // keys that are duplicates, will result in
 | ||||
| // an error.
 | ||||
| func UnmarshalStrict(in []byte, out interface{}) (err error) { | ||||
| 	return unmarshal(in, out, true) | ||||
| } | ||||
| 
 | ||||
| // A Decorder reads and decodes YAML values from an input stream.
 | ||||
| type Decoder struct { | ||||
| 	strict bool | ||||
| 	parser *parser | ||||
| } | ||||
| 
 | ||||
| // NewDecoder returns a new decoder that reads from r.
 | ||||
| //
 | ||||
| // The decoder introduces its own buffering and may read
 | ||||
| // data from r beyond the YAML values requested.
 | ||||
| func NewDecoder(r io.Reader) *Decoder { | ||||
| 	return &Decoder{ | ||||
| 		parser: newParserFromReader(r), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // SetStrict sets whether strict decoding behaviour is enabled when
 | ||||
| // decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
 | ||||
| func (dec *Decoder) SetStrict(strict bool) { | ||||
| 	dec.strict = strict | ||||
| } | ||||
| 
 | ||||
| // Decode reads the next YAML-encoded value from its input
 | ||||
| // and stores it in the value pointed to by v.
 | ||||
| //
 | ||||
| // See the documentation for Unmarshal for details about the
 | ||||
| // conversion of YAML into a Go value.
 | ||||
| func (dec *Decoder) Decode(v interface{}) (err error) { | ||||
| 	d := newDecoder(dec.strict) | ||||
| 	defer handleErr(&err) | ||||
| 	d := newDecoder() | ||||
| 	node := dec.parser.parse() | ||||
| 	if node == nil { | ||||
| 		return io.EOF | ||||
| 	} | ||||
| 	out := reflect.ValueOf(v) | ||||
| 	if out.Kind() == reflect.Ptr && !out.IsNil() { | ||||
| 		out = out.Elem() | ||||
| 	} | ||||
| 	d.unmarshal(node, out) | ||||
| 	if len(d.terrors) > 0 { | ||||
| 		return &TypeError{d.terrors} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func unmarshal(in []byte, out interface{}, strict bool) (err error) { | ||||
| 	defer handleErr(&err) | ||||
| 	d := newDecoder(strict) | ||||
| 	p := newParser(in) | ||||
| 	defer p.destroy() | ||||
| 	node := p.parse() | ||||
|  | @ -99,8 +157,8 @@ func Unmarshal(in []byte, out interface{}) (err error) { | |||
| // of the generated document will reflect the structure of the value itself.
 | ||||
| // Maps and pointers (to struct, string, int, etc) are accepted as the in value.
 | ||||
| //
 | ||||
| // Struct fields are only unmarshalled if they are exported (have an upper case
 | ||||
| // first letter), and are unmarshalled using the field name lowercased as the
 | ||||
| // Struct fields are only marshalled if they are exported (have an upper case
 | ||||
| // first letter), and are marshalled using the field name lowercased as the
 | ||||
| // default key. Custom keys may be defined via the "yaml" name in the field
 | ||||
| // tag: the content preceding the first comma is used as the key, and the
 | ||||
| // following comma-separated options are used to tweak the marshalling process.
 | ||||
|  | @ -114,21 +172,25 @@ func Unmarshal(in []byte, out interface{}) (err error) { | |||
| //
 | ||||
| //     omitempty    Only include the field if it's not set to the zero
 | ||||
| //                  value for the type or to empty slices or maps.
 | ||||
| //                  Does not apply to zero valued structs.
 | ||||
| //                  Zero valued structs will be omitted if all their public
 | ||||
| //                  fields are zero, unless they implement an IsZero
 | ||||
| //                  method (see the IsZeroer interface type), in which
 | ||||
| //                  case the field will be included if that method returns true.
 | ||||
| //
 | ||||
| //     flow         Marshal using a flow style (useful for structs,
 | ||||
| //                  sequences and maps.
 | ||||
| //                  sequences and maps).
 | ||||
| //
 | ||||
| //     inline       Inline the struct it's applied to, so its fields
 | ||||
| //                  are processed as if they were part of the outer
 | ||||
| //                  struct.
 | ||||
| //     inline       Inline the field, which must be a struct or a map,
 | ||||
| //                  causing all of its fields or keys to be processed as if
 | ||||
| //                  they were part of the outer struct. For maps, keys must
 | ||||
| //                  not conflict with the yaml keys of other struct fields.
 | ||||
| //
 | ||||
| // In addition, if the key is "-", the field is ignored.
 | ||||
| //
 | ||||
| // For example:
 | ||||
| //
 | ||||
| //     type T struct {
 | ||||
| //         F int "a,omitempty"
 | ||||
| //         F int `yaml:"a,omitempty"`
 | ||||
| //         B int
 | ||||
| //     }
 | ||||
| //     yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
 | ||||
|  | @ -138,12 +200,47 @@ func Marshal(in interface{}) (out []byte, err error) { | |||
| 	defer handleErr(&err) | ||||
| 	e := newEncoder() | ||||
| 	defer e.destroy() | ||||
| 	e.marshal("", reflect.ValueOf(in)) | ||||
| 	e.marshalDoc("", reflect.ValueOf(in)) | ||||
| 	e.finish() | ||||
| 	out = e.out | ||||
| 	return | ||||
| } | ||||
| 
 | ||||
| // An Encoder writes YAML values to an output stream.
 | ||||
| type Encoder struct { | ||||
| 	encoder *encoder | ||||
| } | ||||
| 
 | ||||
| // NewEncoder returns a new encoder that writes to w.
 | ||||
| // The Encoder should be closed after use to flush all data
 | ||||
| // to w.
 | ||||
| func NewEncoder(w io.Writer) *Encoder { | ||||
| 	return &Encoder{ | ||||
| 		encoder: newEncoderWithWriter(w), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Encode writes the YAML encoding of v to the stream.
 | ||||
| // If multiple items are encoded to the stream, the
 | ||||
| // second and subsequent document will be preceded
 | ||||
| // with a "---" document separator, but the first will not.
 | ||||
| //
 | ||||
| // See the documentation for Marshal for details about the conversion of Go
 | ||||
| // values to YAML.
 | ||||
| func (e *Encoder) Encode(v interface{}) (err error) { | ||||
| 	defer handleErr(&err) | ||||
| 	e.encoder.marshalDoc("", reflect.ValueOf(v)) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Close closes the encoder by writing any remaining data.
 | ||||
| // It does not write a stream terminating string "...".
 | ||||
| func (e *Encoder) Close() (err error) { | ||||
| 	defer handleErr(&err) | ||||
| 	e.encoder.finish() | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func handleErr(err *error) { | ||||
| 	if v := recover(); v != nil { | ||||
| 		if e, ok := v.(yamlError); ok { | ||||
|  | @ -199,6 +296,9 @@ type fieldInfo struct { | |||
| 	Num       int | ||||
| 	OmitEmpty bool | ||||
| 	Flow      bool | ||||
| 	// Id holds the unique field identifier, so we can cheaply
 | ||||
| 	// check for field duplicates without maintaining an extra map.
 | ||||
| 	Id int | ||||
| 
 | ||||
| 	// Inline holds the field index if the field is part of an inlined struct.
 | ||||
| 	Inline []int | ||||
|  | @ -221,7 +321,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { | |||
| 	inlineMap := -1 | ||||
| 	for i := 0; i != n; i++ { | ||||
| 		field := st.Field(i) | ||||
| 		if field.PkgPath != "" { | ||||
| 		if field.PkgPath != "" && !field.Anonymous { | ||||
| 			continue // Private field
 | ||||
| 		} | ||||
| 
 | ||||
|  | @ -255,15 +355,14 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { | |||
| 
 | ||||
| 		if inline { | ||||
| 			switch field.Type.Kind() { | ||||
| 			// TODO: Implement support for inline maps.
 | ||||
| 			//case reflect.Map:
 | ||||
| 			//	if inlineMap >= 0 {
 | ||||
| 			//		return nil, errors.New("Multiple ,inline maps in struct " + st.String())
 | ||||
| 			//	}
 | ||||
| 			//	if field.Type.Key() != reflect.TypeOf("") {
 | ||||
| 			//		return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
 | ||||
| 			//	}
 | ||||
| 			//	inlineMap = info.Num
 | ||||
| 			case reflect.Map: | ||||
| 				if inlineMap >= 0 { | ||||
| 					return nil, errors.New("Multiple ,inline maps in struct " + st.String()) | ||||
| 				} | ||||
| 				if field.Type.Key() != reflect.TypeOf("") { | ||||
| 					return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) | ||||
| 				} | ||||
| 				inlineMap = info.Num | ||||
| 			case reflect.Struct: | ||||
| 				sinfo, err := getStructInfo(field.Type) | ||||
| 				if err != nil { | ||||
|  | @ -279,6 +378,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { | |||
| 					} else { | ||||
| 						finfo.Inline = append([]int{i}, finfo.Inline...) | ||||
| 					} | ||||
| 					finfo.Id = len(fieldsList) | ||||
| 					fieldsMap[finfo.Key] = finfo | ||||
| 					fieldsList = append(fieldsList, finfo) | ||||
| 				} | ||||
|  | @ -300,11 +400,16 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { | |||
| 			return nil, errors.New(msg) | ||||
| 		} | ||||
| 
 | ||||
| 		info.Id = len(fieldsList) | ||||
| 		fieldsList = append(fieldsList, info) | ||||
| 		fieldsMap[info.Key] = info | ||||
| 	} | ||||
| 
 | ||||
| 	sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} | ||||
| 	sinfo = &structInfo{ | ||||
| 		FieldsMap:  fieldsMap, | ||||
| 		FieldsList: fieldsList, | ||||
| 		InlineMap:  inlineMap, | ||||
| 	} | ||||
| 
 | ||||
| 	fieldMapMutex.Lock() | ||||
| 	structMap[st] = sinfo | ||||
|  | @ -312,8 +417,23 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { | |||
| 	return sinfo, nil | ||||
| } | ||||
| 
 | ||||
| // IsZeroer is used to check whether an object is zero to
 | ||||
| // determine whether it should be omitted when marshaling
 | ||||
| // with the omitempty flag. One notable implementation
 | ||||
| // is time.Time.
 | ||||
| type IsZeroer interface { | ||||
| 	IsZero() bool | ||||
| } | ||||
| 
 | ||||
| func isZero(v reflect.Value) bool { | ||||
| 	switch v.Kind() { | ||||
| 	kind := v.Kind() | ||||
| 	if z, ok := v.Interface().(IsZeroer); ok { | ||||
| 		if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { | ||||
| 			return true | ||||
| 		} | ||||
| 		return z.IsZero() | ||||
| 	} | ||||
| 	switch kind { | ||||
| 	case reflect.String: | ||||
| 		return len(v.String()) == 0 | ||||
| 	case reflect.Interface, reflect.Ptr: | ||||
|  | @ -324,13 +444,15 @@ func isZero(v reflect.Value) bool { | |||
| 		return v.Len() == 0 | ||||
| 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: | ||||
| 		return v.Int() == 0 | ||||
| 	case reflect.Float32, reflect.Float64: | ||||
| 		return v.Float() == 0 | ||||
| 	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: | ||||
| 		return v.Uint() == 0 | ||||
| 	case reflect.Bool: | ||||
| 		return !v.Bool() | ||||
| 	case reflect.Struct: | ||||
| 		vt := v.Type() | ||||
| 		for i := v.NumField()-1; i >= 0; i-- { | ||||
| 		for i := v.NumField() - 1; i >= 0; i-- { | ||||
| 			if vt.Field(i).PkgPath != "" { | ||||
| 				continue // Private field
 | ||||
| 			} | ||||
|  |  | |||
|  | @ -1,6 +1,7 @@ | |||
| package yaml | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| ) | ||||
| 
 | ||||
|  | @ -239,6 +240,27 @@ const ( | |||
| 	yaml_MAPPING_END_EVENT    // A MAPPING-END event.
 | ||||
| ) | ||||
| 
 | ||||
| var eventStrings = []string{ | ||||
| 	yaml_NO_EVENT:             "none", | ||||
| 	yaml_STREAM_START_EVENT:   "stream start", | ||||
| 	yaml_STREAM_END_EVENT:     "stream end", | ||||
| 	yaml_DOCUMENT_START_EVENT: "document start", | ||||
| 	yaml_DOCUMENT_END_EVENT:   "document end", | ||||
| 	yaml_ALIAS_EVENT:          "alias", | ||||
| 	yaml_SCALAR_EVENT:         "scalar", | ||||
| 	yaml_SEQUENCE_START_EVENT: "sequence start", | ||||
| 	yaml_SEQUENCE_END_EVENT:   "sequence end", | ||||
| 	yaml_MAPPING_START_EVENT:  "mapping start", | ||||
| 	yaml_MAPPING_END_EVENT:    "mapping end", | ||||
| } | ||||
| 
 | ||||
| func (e yaml_event_type_t) String() string { | ||||
| 	if e < 0 || int(e) >= len(eventStrings) { | ||||
| 		return fmt.Sprintf("unknown event %d", e) | ||||
| 	} | ||||
| 	return eventStrings[e] | ||||
| } | ||||
| 
 | ||||
| // The event structure.
 | ||||
| type yaml_event_t struct { | ||||
| 
 | ||||
|  | @ -508,7 +530,7 @@ type yaml_parser_t struct { | |||
| 
 | ||||
| 	problem string // Error description.
 | ||||
| 
 | ||||
| 	// The byte about which the problem occured.
 | ||||
| 	// The byte about which the problem occurred.
 | ||||
| 	problem_offset int | ||||
| 	problem_value  int | ||||
| 	problem_mark   yaml_mark_t | ||||
|  | @ -521,9 +543,9 @@ type yaml_parser_t struct { | |||
| 
 | ||||
| 	read_handler yaml_read_handler_t // Read handler.
 | ||||
| 
 | ||||
| 	input_file io.Reader // File input data.
 | ||||
| 	input      []byte    // String input data.
 | ||||
| 	input_pos  int | ||||
| 	input_reader io.Reader // File input data.
 | ||||
| 	input        []byte    // String input data.
 | ||||
| 	input_pos    int | ||||
| 
 | ||||
| 	eof bool // EOF flag
 | ||||
| 
 | ||||
|  | @ -632,7 +654,7 @@ type yaml_emitter_t struct { | |||
| 	write_handler yaml_write_handler_t // Write handler.
 | ||||
| 
 | ||||
| 	output_buffer *[]byte   // String output data.
 | ||||
| 	output_file   io.Writer // File output data.
 | ||||
| 	output_writer io.Writer // File output data.
 | ||||
| 
 | ||||
| 	buffer     []byte // The working buffer.
 | ||||
| 	buffer_pos int    // The current position of the buffer.
 | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue