diff --git a/app.manifest b/app.manifest
index 6134c8d..1690808 100644
--- a/app.manifest
+++ b/app.manifest
@@ -5,7 +5,7 @@
"id": {
"group": null,
"name": "TA-dmarc",
- "version": "4.1.0"
+ "version": "4.1.1"
},
"author": [
{
@@ -50,4 +50,4 @@
"Enterprise": "*"
}
}
-}
\ No newline at end of file
+}
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/__init__.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/__init__.py
index c437b0e..1f9fc68 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/__init__.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/__init__.py
@@ -15,6 +15,21 @@
"""Python library for Splunk."""
from __future__ import absolute_import
-from .six.moves import map
-__version_info__ = (1, 6, 6)
+from splunklib.six.moves import map
+import logging
+
+DEFAULT_LOG_FORMAT = '%(asctime)s, Level=%(levelname)s, Pid=%(process)s, Logger=%(name)s, File=%(filename)s, ' \
+ 'Line=%(lineno)s, %(message)s'
+DEFAULT_DATE_FORMAT = '%Y-%m-%d %H:%M:%S %Z'
+
+
+# To set the logging level of splunklib
+# ex. To enable debug logs, call this method with parameter 'logging.DEBUG'
+# default logging level is set to 'WARNING'
+def setup_logging(level, log_format=DEFAULT_LOG_FORMAT, date_format=DEFAULT_DATE_FORMAT):
+ logging.basicConfig(level=level,
+ format=log_format,
+ datefmt=date_format)
+
+__version_info__ = (1, 6, 20)
__version__ = ".".join(map(str, __version_info__))
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/binding.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/binding.py
index 8bfa28d..bb2771d 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/binding.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/binding.py
@@ -31,6 +31,7 @@
import socket
import ssl
import sys
+import time
from base64 import b64encode
from contextlib import contextmanager
from datetime import datetime
@@ -38,9 +39,8 @@
from io import BytesIO
from xml.etree.ElementTree import XML
-from . import six
-from .six import StringIO
-from .six.moves import urllib
+from splunklib import six
+from splunklib.six.moves import urllib
from .data import record
@@ -49,6 +49,7 @@
except ImportError as e:
from xml.parsers.expat import ExpatError as ParseError
+logger = logging.getLogger(__name__)
__all__ = [
"AuthenticationError",
@@ -70,7 +71,7 @@ def new_f(*args, **kwargs):
start_time = datetime.now()
val = f(*args, **kwargs)
end_time = datetime.now()
- logging.debug("Operation took %s", end_time-start_time)
+ logger.debug("Operation took %s", end_time-start_time)
return val
return new_f
@@ -80,6 +81,7 @@ def _parse_cookies(cookie_str, dictionary):
then updates the the dictionary with any key-value pairs found.
**Example**::
+
dictionary = {}
_parse_cookies('my=value', dictionary)
# Now the following is True
@@ -295,8 +297,7 @@ def wrapper(self, *args, **kwargs):
with _handle_auth_error("Autologin failed."):
self.login()
with _handle_auth_error(
- "Autologin succeeded, but there was an auth error on "
- "next request. Something is very wrong."):
+ "Authentication Failed! If session token is used, it seems to have been expired."):
return request_fun(self, *args, **kwargs)
elif he.status == 401 and not self.autologin:
raise AuthenticationError(
@@ -449,8 +450,16 @@ class Context(object):
:type username: ``string``
:param password: The password for the Splunk account.
:type password: ``string``
+ :param splunkToken: Splunk authentication token
+ :type splunkToken: ``string``
:param headers: List of extra HTTP headers to send (optional).
:type headers: ``list`` of 2-tuples.
+ :param retires: Number of retries for each HTTP connection (optional, the default is 0).
+ NOTE THAT THIS MAY INCREASE THE NUMBER OF ROUND TRIP CONNECTIONS TO THE SPLUNK SERVER AND BLOCK THE
+ CURRENT THREAD WHILE RETRYING.
+ :type retries: ``int``
+ :param retryDelay: How long to wait between connection attempts if `retries` > 0 (optional, defaults to 10s).
+ :type retryDelay: ``int`` (in seconds)
:param handler: The HTTP request handler (optional).
:returns: A ``Context`` instance.
@@ -468,7 +477,8 @@ class Context(object):
"""
def __init__(self, handler=None, **kwargs):
self.http = HttpLib(handler, kwargs.get("verify", False), key_file=kwargs.get("key_file"),
- cert_file=kwargs.get("cert_file")) # Default to False for backward compat
+ cert_file=kwargs.get("cert_file"), context=kwargs.get("context"), # Default to False for backward compat
+ retries=kwargs.get("retries", 0), retryDelay=kwargs.get("retryDelay", 10))
self.token = kwargs.get("token", _NoAuthenticationToken)
if self.token is None: # In case someone explicitly passes token=None
self.token = _NoAuthenticationToken
@@ -480,6 +490,7 @@ def __init__(self, handler=None, **kwargs):
self.username = kwargs.get("username", "")
self.password = kwargs.get("password", "")
self.basic = kwargs.get("basic", False)
+ self.bearerToken = kwargs.get("splunkToken", "")
self.autologin = kwargs.get("autologin", False)
self.additional_headers = kwargs.get("headers", [])
@@ -496,13 +507,13 @@ def get_cookies(self):
return self.http._cookies
def has_cookies(self):
- """Returns true if the ``HttpLib`` member of this instance has at least
- one cookie stored.
+ """Returns true if the ``HttpLib`` member of this instance has auth token stored.
- :return: ``True`` if there is at least one cookie, else ``False``
+ :return: ``True`` if there is auth token present, else ``False``
:rtype: ``bool``
"""
- return len(self.get_cookies()) > 0
+ auth_token_key = "splunkd_"
+ return any(auth_token_key in key for key in self.get_cookies().keys())
# Shared per-context request headers
@property
@@ -520,6 +531,9 @@ def _auth_headers(self):
elif self.basic and (self.username and self.password):
token = 'Basic %s' % b64encode(("%s:%s" % (self.username, self.password)).encode('utf-8')).decode('ascii')
return [("Authorization", token)]
+ elif self.bearerToken:
+ token = 'Bearer %s' % self.bearerToken
+ return [("Authorization", token)]
elif self.token is _NoAuthenticationToken:
return []
else:
@@ -611,7 +625,7 @@ def delete(self, path_segment, owner=None, app=None, sharing=None, **query):
"""
path = self.authority + self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
- logging.debug("DELETE request to %s (body: %s)", path, repr(query))
+ logger.debug("DELETE request to %s (body: %s)", path, repr(query))
response = self.http.delete(path, self._auth_headers, **query)
return response
@@ -674,7 +688,7 @@ def get(self, path_segment, owner=None, app=None, headers=None, sharing=None, **
path = self.authority + self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
- logging.debug("GET request to %s (body: %s)", path, repr(query))
+ logger.debug("GET request to %s (body: %s)", path, repr(query))
all_headers = headers + self.additional_headers + self._auth_headers
response = self.http.get(path, all_headers, **query)
return response
@@ -717,7 +731,12 @@ def post(self, path_segment, owner=None, app=None, sharing=None, headers=None, *
:type headers: ``list`` of 2-tuples.
:param query: All other keyword arguments, which are used as query
parameters.
- :type query: ``string``
+ :param body: Parameters to be used in the post body. If specified,
+ any parameters in the query will be applied to the URL instead of
+ the body. If a dict is supplied, the key-value pairs will be form
+ encoded. If a string is supplied, the body will be passed through
+ in the request unchanged.
+ :type body: ``dict`` or ``str``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
@@ -747,14 +766,20 @@ def post(self, path_segment, owner=None, app=None, sharing=None, headers=None, *
headers = []
path = self.authority + self._abspath(path_segment, owner=owner, app=app, sharing=sharing)
- logging.debug("POST request to %s (body: %s)", path, repr(query))
+
+ # To avoid writing sensitive data in debug logs
+ endpoint_having_sensitive_data = ["/storage/passwords"]
+ if any(endpoint in path for endpoint in endpoint_having_sensitive_data):
+ logger.debug("POST request to %s ", path)
+ else:
+ logger.debug("POST request to %s (body: %s)", path, repr(query))
all_headers = headers + self.additional_headers + self._auth_headers
response = self.http.post(path, all_headers, **query)
return response
@_authentication
@_log_duration
- def request(self, path_segment, method="GET", headers=None, body="",
+ def request(self, path_segment, method="GET", headers=None, body={},
owner=None, app=None, sharing=None):
"""Issues an arbitrary HTTP request to the REST path segment.
@@ -783,9 +808,6 @@ def request(self, path_segment, method="GET", headers=None, body="",
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
- :param query: All other keyword arguments, which are used as query
- parameters.
- :type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
@@ -814,13 +836,28 @@ def request(self, path_segment, method="GET", headers=None, body="",
path = self.authority \
+ self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
+
all_headers = headers + self.additional_headers + self._auth_headers
- logging.debug("%s request to %s (headers: %s, body: %s)",
+ logger.debug("%s request to %s (headers: %s, body: %s)",
method, path, str(all_headers), repr(body))
- response = self.http.request(path,
- {'method': method,
- 'headers': all_headers,
- 'body': body})
+
+ if body:
+ body = _encode(**body)
+
+ if method == "GET":
+ path = path + UrlEncoded('?' + body, skip_encode=True)
+ message = {'method': method,
+ 'headers': all_headers}
+ else:
+ message = {'method': method,
+ 'headers': all_headers,
+ 'body': body}
+ else:
+ message = {'method': method,
+ 'headers': all_headers}
+
+ response = self.http.request(path, message)
+
return response
def login(self):
@@ -862,6 +899,10 @@ def login(self):
# as credentials were passed in.
return
+ if self.bearerToken:
+ # Bearer auth mode requested, so this method is a nop as long
+ # as authentication token was passed in.
+ return
# Only try to get a token and updated cookie if username & password are specified
try:
response = self.http.post(
@@ -1004,7 +1045,7 @@ class HTTPError(Exception):
def __init__(self, response, _message=None):
status = response.status
reason = response.reason
- body = (response.body.read()).decode()
+ body = response.body.read()
try:
detail = XML(body).findtext("./messages/msg")
except ParseError as err:
@@ -1054,7 +1095,7 @@ def __init__(self, message, cause):
#
# Encode the given kwargs as a query string. This wrapper will also _encode
-# a list value as a sequence of assignemnts to the corresponding arg name,
+# a list value as a sequence of assignments to the corresponding arg name,
# for example an argument such as 'foo=[1,2,3]' will be encoded as
# 'foo=1&foo=2&foo=3'.
def _encode(**kwargs):
@@ -1121,12 +1162,14 @@ class HttpLib(object):
If using the default handler, SSL verification can be disabled by passing verify=False.
"""
- def __init__(self, custom_handler=None, verify=False, key_file=None, cert_file=None):
+ def __init__(self, custom_handler=None, verify=False, key_file=None, cert_file=None, context=None, retries=0, retryDelay=10):
if custom_handler is None:
- self.handler = handler(verify=verify, key_file=key_file, cert_file=cert_file)
+ self.handler = handler(verify=verify, key_file=key_file, cert_file=cert_file, context=context)
else:
self.handler = custom_handler
self._cookies = {}
+ self.retries = retries
+ self.retryDelay = retryDelay
def delete(self, url, headers=None, **kwargs):
"""Sends a DELETE request to a URL.
@@ -1212,6 +1255,8 @@ def post(self, url, headers=None, **kwargs):
headers.append(("Content-Type", "application/x-www-form-urlencoded"))
body = kwargs.pop('body')
+ if isinstance(body, dict):
+ body = _encode(**body).encode('utf-8')
if len(kwargs) > 0:
url = url + UrlEncoded('?' + _encode(**kwargs), skip_encode=True)
else:
@@ -1238,7 +1283,16 @@ def request(self, url, message, **kwargs):
its structure).
:rtype: ``dict``
"""
- response = self.handler(url, message, **kwargs)
+ while True:
+ try:
+ response = self.handler(url, message, **kwargs)
+ break
+ except Exception:
+ if self.retries <= 0:
+ raise
+ else:
+ time.sleep(self.retryDelay)
+ self.retries -= 1
response = record(response)
if 400 <= response.status:
raise HTTPError(response)
@@ -1274,7 +1328,10 @@ def __init__(self, response, connection=None):
self._buffer = b''
def __str__(self):
- return self.read()
+ if six.PY2:
+ return self.read()
+ else:
+ return str(self.read(), 'UTF-8')
@property
def empty(self):
@@ -1333,7 +1390,7 @@ def readinto(self, byte_array):
return bytes_read
-def handler(key_file=None, cert_file=None, timeout=None, verify=False):
+def handler(key_file=None, cert_file=None, timeout=None, verify=False, context=None):
"""This class returns an instance of the default HTTP request handler using
the values you provide.
@@ -1345,6 +1402,8 @@ def handler(key_file=None, cert_file=None, timeout=None, verify=False):
:type timeout: ``integer`` or "None"
:param `verify`: Set to False to disable SSL verification on https connections.
:type verify: ``Boolean``
+ :param `context`: The SSLContext that can is used with the HTTPSConnection when verify=True is enabled and context is specified
+ :type context: ``SSLContext`
"""
def connect(scheme, host, port):
@@ -1356,9 +1415,12 @@ def connect(scheme, host, port):
if key_file is not None: kwargs['key_file'] = key_file
if cert_file is not None: kwargs['cert_file'] = cert_file
- # If running Python 2.7.9+, disable SSL certificate validation
- if (sys.version_info >= (2,7,9) and key_file is None and cert_file is None) and not verify:
+ if not verify:
kwargs['context'] = ssl._create_unverified_context()
+ elif context:
+ # verify is True in elif branch and context is not None
+ kwargs['context'] = context
+
return six.moves.http_client.HTTPSConnection(host, port, **kwargs)
raise ValueError("unsupported scheme: %s" % scheme)
@@ -1368,7 +1430,7 @@ def request(url, message, **kwargs):
head = {
"Content-Length": str(len(body)),
"Host": host,
- "User-Agent": "splunk-sdk-python/1.6.6",
+ "User-Agent": "splunk-sdk-python/1.6.20",
"Accept": "*/*",
"Connection": "Close",
} # defaults
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/client.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/client.py
index cb04093..35d9e4f 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/client.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/client.py
@@ -66,8 +66,8 @@
from datetime import datetime, timedelta
from time import sleep
-from . import six
-from .six.moves import urllib
+from splunklib import six
+from splunklib.six.moves import urllib
from . import data
from .binding import (AuthenticationError, Context, HTTPError, UrlEncoded,
@@ -75,6 +75,8 @@
namespace)
from .data import record
+logger = logging.getLogger(__name__)
+
__all__ = [
"connect",
"NotSupportedError",
@@ -224,7 +226,10 @@ def _load_atom_entries(response):
# Load the sid from the body of the given response
-def _load_sid(response):
+def _load_sid(response, output_mode):
+ if output_mode == "json":
+ json_obj = json.loads(response.body.read())
+ return json_obj.get('sid')
return _load_atom(response).response.sid
@@ -295,7 +300,7 @@ def connect(**kwargs):
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
- :param verify: Enable (True) or disable (False) SSL verrification for
+ :param verify: Enable (True) or disable (False) SSL verification for
https connections. (optional, the default is True)
:type verify: ``Boolean``
:param `owner`: The owner context of the namespace (optional).
@@ -318,6 +323,13 @@ def connect(**kwargs):
:type username: ``string``
:param `password`: The password for the Splunk account.
:type password: ``string``
+ :param retires: Number of retries for each HTTP connection (optional, the default is 0).
+ NOTE THAT THIS MAY INCREASE THE NUMBER OF ROUND TRIP CONNECTIONS TO THE SPLUNK SERVER.
+ :type retries: ``int``
+ :param retryDelay: How long to wait between connection attempts if `retries` > 0 (optional, defaults to 10s).
+ :type retryDelay: ``int`` (in seconds)
+ :param `context`: The SSLContext that can be used when setting verify=True (optional)
+ :type context: ``SSLContext``
:return: An initialized :class:`Service` connection.
**Example**::
@@ -365,7 +377,7 @@ class Service(_BaseService):
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
- :param verify: Enable (True) or disable (False) SSL verrification for
+ :param verify: Enable (True) or disable (False) SSL verification for
https connections. (optional, the default is True)
:type verify: ``Boolean``
:param `owner`: The owner context of the namespace (optional; use "-" for wildcard).
@@ -384,6 +396,11 @@ class Service(_BaseService):
:param `password`: The password, which is used to authenticate the Splunk
instance.
:type password: ``string``
+ :param retires: Number of retries for each HTTP connection (optional, the default is 0).
+ NOTE THAT THIS MAY INCREASE THE NUMBER OF ROUND TRIP CONNECTIONS TO THE SPLUNK SERVER.
+ :type retries: ``int``
+ :param retryDelay: How long to wait between connection attempts if `retries` > 0 (optional, defaults to 10s).
+ :type retryDelay: ``int`` (in seconds)
:return: A :class:`Service` instance.
**Example**::
@@ -401,6 +418,7 @@ class Service(_BaseService):
def __init__(self, **kwargs):
super(Service, self).__init__(**kwargs)
self._splunk_version = None
+ self._kvstore_owner = None
@property
def apps(self):
@@ -463,6 +481,13 @@ def info(self):
response = self.get("/services/server/info")
return _filter_content(_load_atom(response, MATCH_ENTRY_CONTENT))
+ def input(self, path, kind=None):
+ """Retrieves an input by path, and optionally kind.
+
+ :return: A :class:`Input` object.
+ """
+ return Input(self, path, kind=kind).refresh()
+
@property
def inputs(self):
"""Returns the collection of inputs configured on this Splunk instance.
@@ -666,12 +691,34 @@ def splunk_version(self):
self._splunk_version = tuple([int(p) for p in self.info['version'].split('.')])
return self._splunk_version
+ @property
+ def kvstore_owner(self):
+ """Returns the KVStore owner for this instance of Splunk.
+
+ By default is the kvstore owner is not set, it will return "nobody"
+ :return: A string with the KVStore owner.
+ """
+ if self._kvstore_owner is None:
+ self._kvstore_owner = "nobody"
+ return self._kvstore_owner
+
+ @kvstore_owner.setter
+ def kvstore_owner(self, value):
+ """
+ kvstore is refreshed, when the owner value is changed
+ """
+ self._kvstore_owner = value
+ self.kvstore
+
@property
def kvstore(self):
"""Returns the collection of KV Store collections.
+ sets the owner for the namespace, before retrieving the KVStore Collection
+
:return: A :class:`KVStoreCollections` collection of :class:`KVStoreCollection` entities.
"""
+ self.namespace['owner'] = self.kvstore_owner
return KVStoreCollections(self)
@property
@@ -692,7 +739,7 @@ class Endpoint(object):
"""
def __init__(self, service, path):
self.service = service
- self.path = path if path.endswith('/') else path + '/'
+ self.path = path
def get(self, path_segment="", owner=None, app=None, sharing=None, **query):
"""Performs a GET operation on the path segment relative to this endpoint.
@@ -750,6 +797,8 @@ def get(self, path_segment="", owner=None, app=None, sharing=None, **query):
if path_segment.startswith('/'):
path = path_segment
else:
+ if not self.path.endswith('/') and path_segment != "":
+ self.path = self.path + '/'
path = self.service._abspath(self.path + path_segment, owner=owner,
app=app, sharing=sharing)
# ^-- This was "%s%s" % (self.path, path_segment).
@@ -810,6 +859,8 @@ def post(self, path_segment="", owner=None, app=None, sharing=None, **query):
if path_segment.startswith('/'):
path = path_segment
else:
+ if not self.path.endswith('/') and path_segment != "":
+ self.path = self.path + '/'
path = self.service._abspath(self.path + path_segment, owner=owner, app=app, sharing=sharing)
return self.service.post(path, owner=owner, app=app, sharing=sharing, **query)
@@ -821,35 +872,24 @@ class Entity(Endpoint):
``Entity`` provides the majority of functionality required by entities.
Subclasses only implement the special cases for individual entities.
- For example for deployment serverclasses, the subclass makes whitelists and
- blacklists into Python lists.
+ For example for saved searches, the subclass makes fields like ``action.email``,
+ ``alert_type``, and ``search`` available.
An ``Entity`` is addressed like a dictionary, with a few extensions,
- so the following all work::
-
- ent['email.action']
- ent['disabled']
- ent['whitelist']
-
- Many endpoints have values that share a prefix, such as
- ``email.to``, ``email.action``, and ``email.subject``. You can extract
- the whole fields, or use the key ``email`` to get a dictionary of
- all the subelements. That is, ``ent['email']`` returns a
- dictionary with the keys ``to``, ``action``, ``subject``, and so on. If
- there are multiple levels of dots, each level is made into a
- subdictionary, so ``email.body.salutation`` can be accessed at
- ``ent['email']['body']['salutation']`` or
- ``ent['email.body.salutation']``.
+ so the following all work, for example in saved searches::
+
+ ent['action.email']
+ ent['alert_type']
+ ent['search']
You can also access the fields as though they were the fields of a Python
object, as in::
- ent.email.action
- ent.disabled
- ent.whitelist
+ ent.alert_type
+ ent.search
However, because some of the field names are not valid Python identifiers,
- the dictionary-like syntax is preferrable.
+ the dictionary-like syntax is preferable.
The state of an :class:`Entity` object is cached, so accessing a field
does not contact the server. If you think the values on the
@@ -946,7 +986,10 @@ def __getitem__(self, key):
def _load_atom_entry(self, response):
elem = _load_atom(response, XNAME_ENTRY)
if isinstance(elem, list):
- raise AmbiguousReferenceException("Fetch from server returned multiple entries for name %s." % self.name)
+ apps = [ele.entry.content.get('eai:appName') for ele in elem]
+
+ raise AmbiguousReferenceException(
+ "Fetch from server returned multiple entries for name '%s' in apps %s." % (elem[0].entry.title, apps))
else:
return elem.entry
@@ -1052,8 +1095,6 @@ def content(self):
def disable(self):
"""Disables the entity at this endpoint."""
self.post("disable")
- if self.service.restart_required:
- self.service.restart(120)
return self
def enable(self):
@@ -1200,7 +1241,7 @@ def __getitem__(self, key):
:raises ValueError: Raised if no namespace is specified and *key*
does not refer to a unique name.
- *Example*::
+ **Example**::
s = client.connect(...)
saved_searches = s.saved_searches
@@ -1437,7 +1478,7 @@ def iter(self, offset=0, count=None, pagesize=None, **kwargs):
if pagesize is None or N < pagesize:
break
offset += N
- logging.debug("pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s", pagesize, fetched, offset, N, kwargs)
+ logger.debug("pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s", pagesize, fetched, offset, N, kwargs)
# kwargs: count, offset, search, sort_dir, sort_key, sort_mode
def list(self, count=None, **kwargs):
@@ -1636,9 +1677,9 @@ def get(self, name="", owner=None, app=None, sharing=None, **query):
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
- Example:
+ **Example**::
- import splunklib.client
+ import splunklib.client
s = client.service(...)
saved_searches = s.saved_searches
saved_searches.get("my/saved/search") == \\
@@ -1865,7 +1906,7 @@ def delete(self, username, realm=None):
name = UrlEncoded(realm, encode_slash=True) + ":" + UrlEncoded(username, encode_slash=True)
# Append the : expected at the end of the name
- if name[-1] is not ":":
+ if name[-1] != ":":
name = name + ":"
return Collection.delete(self, name)
@@ -2079,10 +2120,6 @@ def submit(self, event, host=None, source=None, sourcetype=None):
if source is not None: args['source'] = source
if sourcetype is not None: args['sourcetype'] = sourcetype
- # The reason we use service.request directly rather than POST
- # is that we are not sending a POST request encoded using
- # x-www-form-urlencoded (as we do not have a key=value body),
- # because we aren't really sending a "form".
self.service.post(PATH_RECEIVERS_SIMPLE, body=event, **args)
return self
@@ -2510,9 +2547,9 @@ def list(self, *kinds, **kwargs):
kinds = self.kinds
if len(kinds) == 1:
kind = kinds[0]
- logging.debug("Inputs.list taking short circuit branch for single kind.")
+ logger.debug("Inputs.list taking short circuit branch for single kind.")
path = self.kindpath(kind)
- logging.debug("Path for inputs: %s", path)
+ logger.debug("Path for inputs: %s", path)
try:
path = UrlEncoded(path, skip_encode=True)
response = self.get(path, **kwargs)
@@ -2730,9 +2767,8 @@ def pause(self):
return self
def results(self, **query_params):
- """Returns a streaming handle to this job's search results. To get a
- nice, Pythonic iterator, pass the handle to :class:`splunklib.results.ResultsReader`,
- as in::
+ """Returns a streaming handle to this job's search results. To get a nice, Pythonic iterator, pass the handle
+ to :class:`splunklib.results.JSONResultsReader` along with the query param "output_mode='json'", as in::
import splunklib.client as client
import splunklib.results as results
@@ -2741,7 +2777,7 @@ def results(self, **query_params):
job = service.jobs.create("search * | head 5")
while not job.is_done():
sleep(.2)
- rr = results.ResultsReader(job.results())
+ rr = results.JSONResultsReader(job.results(output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -2771,19 +2807,17 @@ def results(self, **query_params):
def preview(self, **query_params):
"""Returns a streaming handle to this job's preview search results.
- Unlike :class:`splunklib.results.ResultsReader`, which requires a job to
- be finished to
- return any results, the ``preview`` method returns any results that have
- been generated so far, whether the job is running or not. The
- returned search results are the raw data from the server. Pass
- the handle returned to :class:`splunklib.results.ResultsReader` to get a
- nice, Pythonic iterator over objects, as in::
+ Unlike :class:`splunklib.results.JSONResultsReader`along with the query param "output_mode='json'",
+ which requires a job to be finished to return any results, the ``preview`` method returns any results that
+ have been generated so far, whether the job is running or not. The returned search results are the raw data
+ from the server. Pass the handle returned to :class:`splunklib.results.JSONResultsReader` to get a nice,
+ Pythonic iterator over objects, as in::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
job = service.jobs.create("search * | head 5")
- rr = results.ResultsReader(job.preview())
+ rr = results.JSONResultsReader(job.preview(output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -2934,19 +2968,19 @@ def create(self, query, **kwargs):
if kwargs.get("exec_mode", None) == "oneshot":
raise TypeError("Cannot specify exec_mode=oneshot; use the oneshot method instead.")
response = self.post(search=query, **kwargs)
- sid = _load_sid(response)
+ sid = _load_sid(response, kwargs.get("output_mode", None))
return Job(self.service, sid)
def export(self, query, **params):
- """Runs a search and immediately starts streaming preview events.
- This method returns a streaming handle to this job's events as an XML
- document from the server. To parse this stream into usable Python objects,
- pass the handle to :class:`splunklib.results.ResultsReader`::
+ """Runs a search and immediately starts streaming preview events. This method returns a streaming handle to
+ this job's events as an XML document from the server. To parse this stream into usable Python objects,
+ pass the handle to :class:`splunklib.results.JSONResultsReader` along with the query param
+ "output_mode='json'"::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
- rr = results.ResultsReader(service.jobs.export("search * | head 5"))
+ rr = results.JSONResultsReader(service.jobs.export("search * | head 5",output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -2995,14 +3029,14 @@ def itemmeta(self):
def oneshot(self, query, **params):
"""Run a oneshot search and returns a streaming handle to the results.
- The ``InputStream`` object streams XML fragments from the server. To
- parse this stream into usable Python objects,
- pass the handle to :class:`splunklib.results.ResultsReader`::
+ The ``InputStream`` object streams fragments from the server. To parse this stream into usable Python
+ objects, pass the handle to :class:`splunklib.results.JSONResultsReader` along with the query param
+ "output_mode='json'" ::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
- rr = results.ResultsReader(service.jobs.oneshot("search * | head 5"))
+ rr = results.JSONResultsReader(service.jobs.oneshot("search * | head 5",output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -3150,7 +3184,7 @@ def dispatch(self, **kwargs):
:return: The :class:`Job`.
"""
response = self.post("dispatch", **kwargs)
- sid = _load_sid(response)
+ sid = _load_sid(response, kwargs.get("output_mode", None))
return Job(self.service, sid)
@property
@@ -3569,7 +3603,7 @@ class KVStoreCollection(Entity):
def data(self):
"""Returns data object for this Collection.
- :rtype: :class:`KVStoreData`
+ :rtype: :class:`KVStoreCollectionData`
"""
return KVStoreCollectionData(self)
@@ -3584,7 +3618,7 @@ def update_index(self, name, value):
:return: Result of POST request
"""
kwargs = {}
- kwargs['index.' + name] = value if isinstance(value, basestring) else json.dumps(value)
+ kwargs['index.' + name] = value if isinstance(value, six.string_types) else json.dumps(value)
return self.post(**kwargs)
def update_field(self, name, value):
@@ -3612,7 +3646,7 @@ def __init__(self, collection):
self.service = collection.service
self.collection = collection
self.owner, self.app, self.sharing = collection._proper_namespace()
- self.path = 'storage/collections/data/' + UrlEncoded(self.collection.name) + '/'
+ self.path = 'storage/collections/data/' + UrlEncoded(self.collection.name, encode_slash=True) + '/'
def _get(self, url, **kwargs):
return self.service.get(self.path + url, owner=self.owner, app=self.app, sharing=self.sharing, **kwargs)
@@ -3633,6 +3667,11 @@ def query(self, **query):
:return: Array of documents retrieved by query.
:rtype: ``array``
"""
+
+ for key, value in query.items():
+ if isinstance(query[key], dict):
+ query[key] = json.dumps(value)
+
return json.loads(self._get('', **query).body.read().decode('utf-8'))
def query_by_id(self, id):
@@ -3645,7 +3684,7 @@ def query_by_id(self, id):
:return: Document with id
:rtype: ``dict``
"""
- return json.loads(self._get(UrlEncoded(str(id))).body.read().decode('utf-8'))
+ return json.loads(self._get(UrlEncoded(str(id), encode_slash=True)).body.read().decode('utf-8'))
def insert(self, data):
"""
@@ -3657,6 +3696,8 @@ def insert(self, data):
:return: _id of inserted object
:rtype: ``dict``
"""
+ if isinstance(data, dict):
+ data = json.dumps(data)
return json.loads(self._post('', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
def delete(self, query=None):
@@ -3679,7 +3720,7 @@ def delete_by_id(self, id):
:return: Result of DELETE request
"""
- return self._delete(UrlEncoded(str(id)))
+ return self._delete(UrlEncoded(str(id), encode_slash=True))
def update(self, id, data):
"""
@@ -3693,7 +3734,9 @@ def update(self, id, data):
:return: id of replaced document
:rtype: ``dict``
"""
- return json.loads(self._post(UrlEncoded(str(id)), headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
+ if isinstance(data, dict):
+ data = json.dumps(data)
+ return json.loads(self._post(UrlEncoded(str(id), encode_slash=True), headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
def batch_find(self, *dbqueries):
"""
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/data.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/data.py
index c29063d..f9ffb86 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/data.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/data.py
@@ -19,7 +19,7 @@
from __future__ import absolute_import
import sys
from xml.etree.ElementTree import XML
-from . import six
+from splunklib import six
__all__ = ["load"]
@@ -161,8 +161,8 @@ def load_value(element, nametable=None):
text = element.text
if text is None:
return None
- text = text.strip()
- if len(text) == 0:
+
+ if len(text.strip()) == 0:
return None
return text
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/argument.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/argument.py
index 4c4b3c8..04214d1 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/argument.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/argument.py
@@ -54,9 +54,9 @@ def __init__(self, name, description=None, validation=None,
:param name: ``string``, identifier for this argument in Splunk.
:param description: ``string``, human-readable description of the argument.
:param validation: ``string`` specifying how the argument should be validated, if using internal validation.
- If using external validation, this will be ignored.
+ If using external validation, this will be ignored.
:param data_type: ``string``, data type of this field; use the class constants.
- "data_type_boolean", "data_type_number", or "data_type_string".
+ "data_type_boolean", "data_type_number", or "data_type_string".
:param required_on_edit: ``Boolean``, whether this arg is required when editing an existing modular input of this kind.
:param required_on_create: ``Boolean``, whether this arg is required when creating a modular input of this kind.
:param title: ``String``, a human-readable title for the argument.
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/event.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/event.py
index f840432..9cd6cf3 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/event.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/event.py
@@ -13,6 +13,9 @@
# under the License.
from __future__ import absolute_import
+from io import TextIOBase
+from splunklib.six import ensure_text
+
try:
import xml.etree.cElementTree as ET
except ImportError as ie:
@@ -104,5 +107,8 @@ def write_to(self, stream):
if self.done:
ET.SubElement(event, "done")
- stream.write(ET.tostring(event).decode())
+ if isinstance(stream, TextIOBase):
+ stream.write(ensure_text(ET.tostring(event)))
+ else:
+ stream.write(ET.tostring(event))
stream.flush()
\ No newline at end of file
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/event_writer.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/event_writer.py
old mode 100644
new mode 100755
index d8a2a2e..5f8c5aa
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/event_writer.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/event_writer.py
@@ -15,12 +15,16 @@
from __future__ import absolute_import
import sys
+from splunklib.six import ensure_str
from .event import ET
+try:
+ from splunklib.six.moves import cStringIO as StringIO
+except ImportError:
+ from splunklib.six import StringIO
class EventWriter(object):
"""``EventWriter`` writes events and error messages to Splunk from a modular input.
-
Its two important methods are ``writeEvent``, which takes an ``Event`` object,
and ``log``, which takes a severity and an error message.
"""
@@ -64,7 +68,7 @@ def log(self, severity, message):
:param message: ``string``, message to log.
"""
- self._err.write(("%s %s\n" % (severity, message)))
+ self._err.write("%s %s\n" % (severity, message))
self._err.flush()
def write_xml_document(self, document):
@@ -73,12 +77,11 @@ def write_xml_document(self, document):
:param document: An ``ElementTree`` object.
"""
- try:
- self._out.write(ET.tostring(document))
- except:
- self._out.write(ET.tostring(document, encoding="unicode"))
+ self._out.write(ensure_str(ET.tostring(document)))
self._out.flush()
def close(self):
"""Write the closing tag to make this XML well formed."""
- self._out.write("")
+ if self.header_written:
+ self._out.write("")
+ self._out.flush()
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/scheme.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/scheme.py
index ff4f978..4104e4a 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/scheme.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/scheme.py
@@ -55,7 +55,7 @@ def add_argument(self, arg):
def to_xml(self):
"""Creates an ``ET.Element`` representing self, then returns it.
- :returns root, an ``ET.Element`` representing this scheme.
+ :returns: an ``ET.Element`` representing this scheme.
"""
root = ET.Element("scheme")
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/script.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/script.py
index 86484ec..8595dc4 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/script.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/script.py
@@ -14,14 +14,14 @@
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
-from ..six.moves.urllib.parse import urlsplit
+from splunklib.six.moves.urllib.parse import urlsplit
import sys
from ..client import Service
from .event_writer import EventWriter
from .input_definition import InputDefinition
from .validation_definition import ValidationDefinition
-from .. import six
+from splunklib import six
try:
import xml.etree.cElementTree as ET
@@ -105,8 +105,7 @@ def run_script(self, args, event_writer, input_stream):
return 1
except Exception as e:
- err_string = EventWriter.ERROR + str(e)
- event_writer._err.write(err_string)
+ event_writer.log(EventWriter.ERROR, str(e))
return 1
@property
@@ -118,9 +117,9 @@ def service(self):
available as soon as the :code:`Script.stream_events` method is
called.
- :return: :class:splunklib.client.Service. A value of None is returned,
- if you call this method before the :code:`Script.stream_events` method
- is called.
+ :return: :class:`splunklib.client.Service`. A value of None is returned,
+ if you call this method before the :code:`Script.stream_events` method
+ is called.
"""
if self._service is not None:
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/utils.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/utils.py
index 47488dc..3d42b63 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/utils.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/utils.py
@@ -15,7 +15,7 @@
# File for utility functions
from __future__ import absolute_import
-from ..six.moves import zip
+from splunklib.six.moves import zip
def xml_compare(expected, found):
"""Checks equality of two ``ElementTree`` objects.
@@ -64,11 +64,14 @@ def parse_parameters(param_node):
def parse_xml_data(parent_node, child_node_tag):
data = {}
for child in parent_node:
+ child_name = child.get("name")
if child.tag == child_node_tag:
if child_node_tag == "stanza":
- data[child.get("name")] = {}
+ data[child_name] = {
+ "__app": child.get("app", None)
+ }
for param in child:
- data[child.get("name")][param.get("name")] = parse_parameters(param)
+ data[child_name][param.get("name")] = parse_parameters(param)
elif "item" == parent_node.tag:
- data[child.get("name")] = parse_parameters(child)
+ data[child_name] = parse_parameters(child)
return data
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/validation_definition.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/validation_definition.py
index 8904e40..3bbe976 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/validation_definition.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/modularinput/validation_definition.py
@@ -28,7 +28,7 @@ class ValidationDefinition(object):
**Example**::
- ``v = ValidationDefinition()``
+ v = ValidationDefinition()
"""
def __init__(self):
@@ -46,23 +46,25 @@ def parse(stream):
The XML typically will look like this:
- ````
- `` myHost``
- `` https://127.0.0.1:8089``
- `` 123102983109283019283``
- `` /opt/splunk/var/lib/splunk/modinputs``
- `` - ``
- `` value1``
- `` ``
- `` value2``
- `` value3``
- `` value4``
- `` ``
- ``
``
- ````
+ .. code-block:: xml
+
+
+ myHost
+ https://127.0.0.1:8089
+ 123102983109283019283
+ /opt/splunk/var/lib/splunk/modinputs
+ -
+ value1
+
+ value2
+ value3
+ value4
+
+
+
:param stream: ``Stream`` containing XML to parse.
- :return definition: A ``ValidationDefinition`` object.
+ :return: A ``ValidationDefinition`` object.
"""
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/results.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/results.py
index 2a03a4f..8543ab0 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/results.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/results.py
@@ -23,7 +23,7 @@
accessing search results while avoiding buffering the result set, which can be
very large.
-To use the reader, instantiate :class:`ResultsReader` on a search result stream
+To use the reader, instantiate :class:`JSONResultsReader` on a search result stream
as follows:::
reader = ResultsReader(result_stream)
@@ -34,29 +34,32 @@
from __future__ import absolute_import
-from io import BytesIO
+from io import BufferedReader, BytesIO
+
+from splunklib import six
+
+from splunklib.six import deprecated
-from . import six
try:
import xml.etree.cElementTree as et
except:
import xml.etree.ElementTree as et
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from .ordereddict import OrderedDict
+from collections import OrderedDict
+from json import loads as json_loads
try:
- from .six.moves import cStringIO as StringIO
+ from splunklib.six.moves import cStringIO as StringIO
except:
- from .six import StringIO
+ from splunklib.six import StringIO
__all__ = [
"ResultsReader",
- "Message"
+ "Message",
+ "JSONResultsReader"
]
+
class Message(object):
"""This class represents informational messages that Splunk interleaves in the results stream.
@@ -67,6 +70,7 @@ class Message(object):
m = Message("DEBUG", "There's something in that variable...")
"""
+
def __init__(self, type_, message):
self.type = type_
self.message = message
@@ -80,6 +84,7 @@ def __eq__(self, other):
def __hash__(self):
return hash((self.type, self.message))
+
class _ConcatenatedStream(object):
"""Lazily concatenate zero or more streams into a stream.
@@ -92,6 +97,7 @@ class _ConcatenatedStream(object):
s = _ConcatenatedStream(StringIO("abc"), StringIO("def"))
assert s.read() == "abcdef"
"""
+
def __init__(self, *streams):
self.streams = list(streams)
@@ -110,6 +116,7 @@ def read(self, n=None):
del self.streams[0]
return response
+
class _XMLDTDFilter(object):
"""Lazily remove all XML DTDs from a stream.
@@ -123,6 +130,7 @@ class _XMLDTDFilter(object):
s = _XMLDTDFilter("")
assert s.read() == ""
"""
+
def __init__(self, stream):
self.stream = stream
@@ -153,6 +161,8 @@ def read(self, n=None):
n -= 1
return response
+
+@deprecated("Use the JSONResultsReader function instead in conjuction with the 'output_mode' query param set to 'json'")
class ResultsReader(object):
"""This class returns dictionaries and Splunk messages from an XML results
stream.
@@ -180,6 +190,7 @@ class ResultsReader(object):
print "Message: %s" % result
print "is_preview = %s " % reader.is_preview
"""
+
# Be sure to update the docstrings of client.Jobs.oneshot,
# client.Job.results_preview and client.Job.results to match any
# changes made to ResultsReader.
@@ -260,16 +271,16 @@ def _parse_results(self, stream):
# So we'll define it here
def __itertext(self):
- tag = self.tag
- if not isinstance(tag, six.string_types) and tag is not None:
- return
- if self.text:
- yield self.text
- for e in self:
- for s in __itertext(e):
- yield s
- if e.tail:
- yield e.tail
+ tag = self.tag
+ if not isinstance(tag, six.string_types) and tag is not None:
+ return
+ if self.text:
+ yield self.text
+ for e in self:
+ for s in __itertext(e):
+ yield s
+ if e.tail:
+ yield e.tail
text = "".join(__itertext(elem))
values.append(text)
@@ -291,5 +302,72 @@ def __itertext(self):
raise
+class JSONResultsReader(object):
+ """This class returns dictionaries and Splunk messages from a JSON results
+ stream.
+ ``JSONResultsReader`` is iterable, and returns a ``dict`` for results, or a
+ :class:`Message` object for Splunk messages. This class has one field,
+ ``is_preview``, which is ``True`` when the results are a preview from a
+ running search, or ``False`` when the results are from a completed search.
+
+ This function has no network activity other than what is implicit in the
+ stream it operates on.
+
+ :param `stream`: The stream to read from (any object that supports``.read()``).
+
+ **Example**::
+
+ import results
+ response = ... # the body of an HTTP response
+ reader = results.JSONResultsReader(response)
+ for result in reader:
+ if isinstance(result, dict):
+ print "Result: %s" % result
+ elif isinstance(result, results.Message):
+ print "Message: %s" % result
+ print "is_preview = %s " % reader.is_preview
+ """
+
+ # Be sure to update the docstrings of client.Jobs.oneshot,
+ # client.Job.results_preview and client.Job.results to match any
+ # changes made to JSONResultsReader.
+ #
+ # This wouldn't be a class, just the _parse_results function below,
+ # except that you cannot get the current generator inside the
+ # function creating that generator. Thus it's all wrapped up for
+ # the sake of one field.
+ def __init__(self, stream):
+ # The search/jobs/exports endpoint, when run with
+ # earliest_time=rt and latest_time=rt, output_mode=json, streams a sequence of
+ # JSON documents, each containing a result, as opposed to one
+ # results element containing lots of results.
+ stream = BufferedReader(stream)
+ self.is_preview = None
+ self._gen = self._parse_results(stream)
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ return next(self._gen)
+ __next__ = next
+ def _parse_results(self, stream):
+ """Parse results and messages out of *stream*."""
+ for line in stream.readlines():
+ strip_line = line.strip()
+ if strip_line.__len__() == 0: continue
+ parsed_line = json_loads(strip_line)
+ if "preview" in parsed_line:
+ self.is_preview = parsed_line["preview"]
+ if "messages" in parsed_line and parsed_line["messages"].__len__() > 0:
+ for message in parsed_line["messages"]:
+ msg_type = message.get("type", "Unknown Message Type")
+ text = message.get("text")
+ yield Message(msg_type, text)
+ if "result" in parsed_line:
+ yield parsed_line["result"]
+ if "results" in parsed_line:
+ for result in parsed_line["results"]:
+ yield result
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/__init__.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/__init__.py
index 12b14f3..8a92903 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/__init__.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/__init__.py
@@ -30,7 +30,7 @@
field-name = ( "_" / alpha ) *( alpha / digit / "_" / "." / "-" )
It does not show that :code:`field-name` values may be comma-separated. This is because Splunk strips commas from
- the command line. A search command will never see them.
+ the command line. A search command will never see them.
2. Search commands targeting versions of Splunk prior to 6.3 must be statically configured as follows:
@@ -134,9 +134,13 @@
.. topic:: References
- 1. `Search command style guide `_
+ 1. `Custom Search Command manual: `__
- 2. `Commands.conf.spec `_
+ 2. `Create Custom Search Commands with commands.conf.spec `_
+
+ 3. `Configure seach assistant with searchbnf.conf `_
+
+ 4. `Control search distribution with distsearch.conf `_
"""
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/decorators.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/decorators.py
index 5ef92f7..d8b3f48 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/decorators.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/decorators.py
@@ -15,15 +15,12 @@
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
-from .. import six
+from splunklib import six
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from ..ordereddict import OrderedDict
+from collections import OrderedDict # must be python 2.7
from inspect import getmembers, isclass, isfunction
-from ..six.moves import map as imap
+from splunklib.six.moves import map as imap
from .internals import ConfigurationSettingsType, json_encode_string
from .validators import OptionName
@@ -36,7 +33,7 @@ class Configuration(object):
variable to search command classes that don't have one. The :code:`name` is derived from the name of the class.
By convention command class names end with the word "Command". To derive :code:`name` the word "Command" is removed
from the end of the class name and then converted to lower case for conformance with the `Search command style guide
- `_
+ `__
"""
def __init__(self, o=None, **kwargs):
@@ -229,8 +226,9 @@ class Option(property):
Short form (recommended). When you are satisfied with built-in or custom validation behaviors.
- .. code-block:: python
+ .. code-block:: python
:linenos:
+
from splunklib.searchcommands.decorators import Option
from splunklib.searchcommands.validators import Fieldname
@@ -247,8 +245,9 @@ class Option(property):
also provide a deleter. You must be prepared to accept a value of :const:`None` which indicates that your
:code:`Option` is unset.
- .. code-block:: python
+ .. code-block:: python
:linenos:
+
from splunklib.searchcommands import Option
@Option()
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/environment.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/environment.py
index 6773e39..e92018f 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/environment.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/environment.py
@@ -19,7 +19,7 @@
from logging import getLogger, root, StreamHandler
from logging.config import fileConfig
from os import chdir, environ, path
-from ..six.moves import getcwd
+from splunklib.six.moves import getcwd
import sys
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/eventing_command.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/eventing_command.py
index 147c871..27dc13a 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/eventing_command.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/eventing_command.py
@@ -16,7 +16,8 @@
from __future__ import absolute_import, division, print_function, unicode_literals
-from ..six.moves import map as imap
+from splunklib import six
+from splunklib.six.moves import map as imap
from .decorators import ConfigurationSetting
from .search_command import SearchCommand
@@ -135,8 +136,14 @@ def fix_up(cls, command):
raise AttributeError('No EventingCommand.transform override')
SearchCommand.ConfigurationSettings.fix_up(command)
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
iteritems = SearchCommand.ConfigurationSettings.iteritems(self)
return imap(lambda name_value: (name_value[0], 'events' if name_value[0] == 'type' else name_value[1]), iteritems)
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
+
# endregion
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/external_search_command.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/external_search_command.py
index 989c4aa..c230624 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/external_search_command.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/external_search_command.py
@@ -20,7 +20,7 @@
import os
import sys
import traceback
-from .. import six
+from splunklib import six
if sys.platform == 'win32':
from signal import signal, CTRL_BREAK_EVENT, SIGBREAK, SIGINT, SIGTERM
@@ -105,13 +105,13 @@ def _execute(path, argv=None, environ=None):
:param argv: Argument list.
:type argv: list or tuple
- The arguments to the child process should start with the name of the command being run, but this is not
- enforced. A value of :const:`None` specifies that the base name of path name :param:`path` should be used.
+ The arguments to the child process should start with the name of the command being run, but this is not
+ enforced. A value of :const:`None` specifies that the base name of path name :param:`path` should be used.
:param environ: A mapping which is used to define the environment variables for the new process.
:type environ: dict or None.
- This mapping is used instead of the current process’s environment. A value of :const:`None` specifies that
- the :data:`os.environ` mapping should be used.
+ This mapping is used instead of the current process’s environment. A value of :const:`None` specifies that
+ the :data:`os.environ` mapping should be used.
:return: None
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/generating_command.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/generating_command.py
index 2f97300..6a75d2c 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/generating_command.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/generating_command.py
@@ -15,11 +15,13 @@
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
+import sys
from .decorators import ConfigurationSetting
from .search_command import SearchCommand
-from ..six.moves import map as imap, filter as ifilter
+from splunklib import six
+from splunklib.six.moves import map as imap, filter as ifilter
# P1 [O] TODO: Discuss generates_timeorder in the class-level documentation for GeneratingCommand
@@ -92,9 +94,10 @@ class StreamingGeneratingCommand(GeneratingCommand)
+==========+===================================================+===================================================+
| streams | 1. Add this line to your command's stanza in | 1. Add this configuration setting to your code: |
| | | |
- | | default/commands.conf. | .. code-block:: python |
- | | .. code-block:: python | @Configuration(distributed=True) |
- | | local = false | class SomeCommand(GeneratingCommand) |
+ | | default/commands.conf:: | .. code-block:: python |
+ | | | |
+ | | local = false | @Configuration(distributed=True) |
+ | | | class SomeCommand(GeneratingCommand) |
| | | ... |
| | 2. Restart splunk | |
| | | 2. You are good to go; no need to restart Splunk |
@@ -112,6 +115,7 @@ class StreamingGeneratingCommand(GeneratingCommand)
| | settings to your command class: | setting to your command class: |
| | | |
| | .. code-block:: python | .. code-block:: python |
+ | | | |
| | @Configuration( | @Configuration(type='events') |
| | retainsevents=True, streaming=False) | class SomeCommand(GeneratingCommand) |
| | class SomeCommand(GeneratingCommand) | ... |
@@ -119,22 +123,25 @@ class StreamingGeneratingCommand(GeneratingCommand)
| | | |
| | Or add these lines to default/commands.conf: | |
| | | |
- | | .. code-block:: | |
- | | retains events = true | |
+ | | .. code-block:: text | |
+ | | | |
+ | | retainsevents = true | |
| | streaming = false | |
+----------+---------------------------------------------------+---------------------------------------------------+
Configure your command class like this, if you wish to support both protocols:
- .. code-block:: python
+ .. code-block:: python
+
@Configuration(type='events', retainsevents=True, streaming=False)
class SomeCommand(GeneratingCommand)
...
You might also consider adding these lines to commands.conf instead of adding them to your command class:
- .. code-block:: python
- retains events = false
+ .. code-block:: python
+
+ retainsevents = false
streaming = false
Reporting Generating command
@@ -149,28 +156,32 @@ class SomeCommand(GeneratingCommand)
| | settings to your command class: | setting to your command class: |
| | | |
| | .. code-block:: python | .. code-block:: python |
+ | | | |
| | @Configuration(retainsevents=False) | @Configuration(type='reporting') |
| | class SomeCommand(GeneratingCommand) | class SomeCommand(GeneratingCommand) |
| | ... | ... |
| | | |
| | Or add this lines to default/commands.conf: | |
| | | |
- | | .. code-block:: | |
- | | retains events = false | |
+ | | .. code-block:: text | |
+ | | | |
+ | | retainsevents = false | |
| | streaming = false | |
+----------+---------------------------------------------------+---------------------------------------------------+
Configure your command class like this, if you wish to support both protocols:
- .. code-block:: python
+ .. code-block:: python
+
@Configuration(type='reporting', streaming=False)
class SomeCommand(GeneratingCommand)
...
You might also consider adding these lines to commands.conf instead of adding them to your command class:
- .. code-block:: python
- retains events = false
+ .. code-block:: text
+
+ retainsevents = false
streaming = false
"""
@@ -194,19 +205,57 @@ def _execute(self, ifile, process):
"""
if self._protocol_version == 2:
- result = self._read_chunk(ifile)
+ self._execute_v2(ifile, self.generate())
+ else:
+ assert self._protocol_version == 1
+ self._record_writer.write_records(self.generate())
+ self.finish()
- if not result:
- return
+ def _execute_chunk_v2(self, process, chunk):
+ count = 0
+ records = []
+ for row in process:
+ records.append(row)
+ count += 1
+ if count == self._record_writer._maxresultrows:
+ break
- metadata, body = result
- action = getattr(metadata, 'action', None)
+ for row in records:
+ self._record_writer.write_record(row)
- if action != 'execute':
- raise RuntimeError('Expected execute action, not {}'.format(action))
+ if count == self._record_writer._maxresultrows:
+ self._finished = False
+ else:
+ self._finished = True
- self._record_writer.write_records(self.generate())
- self.finish()
+ def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout, allow_empty_input=True):
+ """ Process data.
+
+ :param argv: Command line arguments.
+ :type argv: list or tuple
+
+ :param ifile: Input data file.
+ :type ifile: file
+
+ :param ofile: Output data file.
+ :type ofile: file
+
+ :param allow_empty_input: For generating commands, it must be true. Doing otherwise will cause an error.
+ :type allow_empty_input: bool
+
+ :return: :const:`None`
+ :rtype: NoneType
+
+ """
+
+ # Generating commands are expected to run on an empty set of inputs as the first command being run in a search,
+ # also this class implements its own separate _execute_chunk_v2 method which does not respect allow_empty_input
+ # so ensure that allow_empty_input is always True
+
+ if not allow_empty_input:
+ raise ValueError("allow_empty_input cannot be False for Generating Commands")
+ else:
+ return super(GeneratingCommand, self).process(argv=argv, ifile=ifile, ofile=ofile, allow_empty_input=True)
# endregion
@@ -315,6 +364,8 @@ def fix_up(cls, command):
if command.generate == GeneratingCommand.generate:
raise AttributeError('No GeneratingCommand.generate override')
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
iteritems = SearchCommand.ConfigurationSettings.iteritems(self)
version = self.command.protocol_version
@@ -325,6 +376,10 @@ def iteritems(self):
lambda name_value: (name_value[0], 'stateful') if name_value[0] == 'type' else (name_value[0], name_value[1]), iteritems)
return iteritems
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
+
pass
# endregion
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/internals.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/internals.py
index 8b76fc2..1ea2833 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/internals.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/internals.py
@@ -16,45 +16,63 @@
from __future__ import absolute_import, division, print_function
+from io import TextIOWrapper
from collections import deque, namedtuple
-from .. import six
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from ..ordereddict import OrderedDict
-from ..six.moves import StringIO
+from splunklib import six
+from collections import OrderedDict
+from splunklib.six.moves import StringIO
from itertools import chain
-from ..six.moves import map as imap
+from splunklib.six.moves import map as imap
from json import JSONDecoder, JSONEncoder
from json.encoder import encode_basestring_ascii as json_encode_string
-from ..six.moves import urllib
+from splunklib.six.moves import urllib
import csv
import gzip
import os
import re
import sys
+import warnings
from . import environment
csv.field_size_limit(10485760) # The default value is 128KB; upping to 10MB. See SPL-12117 for background on this issue
-if sys.platform == 'win32':
- # Work around the fact that on Windows '\n' is mapped to '\r\n'. The typical solution is to simply open files in
- # binary mode, but stdout is already open, thus this hack. 'CPython' and 'PyPy' work differently. We assume that
- # all other Python implementations are compatible with 'CPython'. This might or might not be a valid assumption.
- from platform import python_implementation
- implementation = python_implementation()
- fileno = sys.stdout.fileno()
- if implementation == 'PyPy':
- sys.stdout = os.fdopen(fileno, 'wb', 0)
- else:
- from msvcrt import setmode
- setmode(fileno, os.O_BINARY)
+
+def set_binary_mode(fh):
+ """ Helper method to set up binary mode for file handles.
+ Emphasis being sys.stdin, sys.stdout, sys.stderr.
+ For python3, we want to return .buffer
+ For python2+windows we want to set os.O_BINARY
+ """
+ typefile = TextIOWrapper if sys.version_info >= (3, 0) else file
+ # check for file handle
+ if not isinstance(fh, typefile):
+ return fh
+
+ # check for python3 and buffer
+ if sys.version_info >= (3, 0) and hasattr(fh, 'buffer'):
+ return fh.buffer
+ # check for python3
+ elif sys.version_info >= (3, 0):
+ pass
+ # check for windows python2. SPL-175233 -- python3 stdout is already binary
+ elif sys.platform == 'win32':
+ # Work around the fact that on Windows '\n' is mapped to '\r\n'. The typical solution is to simply open files in
+ # binary mode, but stdout is already open, thus this hack. 'CPython' and 'PyPy' work differently. We assume that
+ # all other Python implementations are compatible with 'CPython'. This might or might not be a valid assumption.
+ from platform import python_implementation
+ implementation = python_implementation()
+ if implementation == 'PyPy':
+ return os.fdopen(fh.fileno(), 'wb', 0)
+ else:
+ import msvcrt
+ msvcrt.setmode(fh.fileno(), os.O_BINARY)
+ return fh
class CommandLineParser(object):
- """ Parses the arguments to a search command.
+ r""" Parses the arguments to a search command.
A search command line is described by the following syntax.
@@ -212,7 +230,7 @@ def replace(match):
_escaped_character_re = re.compile(r'(\\.|""|[\\"])')
- _fieldnames_re = re.compile(r"""("(?:\\.|""|[^"])+"|(?:\\.|[^\s"])+)""")
+ _fieldnames_re = re.compile(r"""("(?:\\.|""|[^"\\])+"|(?:\\.|[^\s"])+)""")
_options_re = re.compile(r"""
# Captures a set of name/value pairs when used with re.finditer
@@ -339,6 +357,8 @@ class CsvDialect(csv.Dialect):
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
+ if sys.version_info >= (3, 0) and sys.platform == 'win32':
+ lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
@@ -346,6 +366,7 @@ class InputHeader(dict):
""" Represents a Splunk input header as a collection of name/value pairs.
"""
+
def __str__(self):
return '\n'.join([name + ':' + value for name, value in six.iteritems(self)])
@@ -373,7 +394,8 @@ def read(self, ifile):
# continuation of the current item
value += urllib.parse.unquote(line)
- if name is not None: self[name] = value[:-1] if value[-1] == '\n' else value
+ if name is not None:
+ self[name] = value[:-1] if value[-1] == '\n' else value
Message = namedtuple('Message', ('type', 'text'))
@@ -470,7 +492,7 @@ class RecordWriter(object):
def __init__(self, ofile, maxresultrows=None):
self._maxresultrows = 50000 if maxresultrows is None else maxresultrows
- self._ofile = ofile
+ self._ofile = set_binary_mode(ofile)
self._fieldnames = None
self._buffer = StringIO()
@@ -481,8 +503,9 @@ def __init__(self, ofile, maxresultrows=None):
self._inspector = OrderedDict()
self._chunk_count = 0
- self._record_count = 0
- self._total_record_count = 0
+ self._pending_record_count = 0
+ self._committed_record_count = 0
+ self.custom_fields = set()
@property
def is_flushed(self):
@@ -498,7 +521,37 @@ def ofile(self):
@ofile.setter
def ofile(self, value):
- self._ofile = value
+ self._ofile = set_binary_mode(value)
+
+ @property
+ def pending_record_count(self):
+ return self._pending_record_count
+
+ @property
+ def _record_count(self):
+ warnings.warn(
+ "_record_count will be deprecated soon. Use pending_record_count instead.",
+ PendingDeprecationWarning
+ )
+ return self.pending_record_count
+
+ @property
+ def committed_record_count(self):
+ return self._committed_record_count
+
+ @property
+ def _total_record_count(self):
+ warnings.warn(
+ "_total_record_count will be deprecated soon. Use committed_record_count instead.",
+ PendingDeprecationWarning
+ )
+ return self.committed_record_count
+
+ def write(self, data):
+ bytes_type = bytes if sys.version_info >= (3, 0) else str
+ if not isinstance(data, bytes_type):
+ data = data.encode('utf-8')
+ self.ofile.write(data)
def flush(self, finished=None, partial=None):
assert finished is None or isinstance(finished, bool)
@@ -517,6 +570,7 @@ def write_record(self, record):
def write_records(self, records):
self._ensure_validity()
+ records = list(records)
write_record = self._write_record
for record in records:
write_record(record)
@@ -525,8 +579,7 @@ def _clear(self):
self._buffer.seek(0)
self._buffer.truncate()
self._inspector.clear()
- self._record_count = 0
- self._flushed = False
+ self._pending_record_count = 0
def _ensure_validity(self):
if self._finished is True:
@@ -539,6 +592,7 @@ def _write_record(self, record):
if fieldnames is None:
self._fieldnames = fieldnames = list(record.keys())
+ self._fieldnames.extend([i for i in self.custom_fields if i not in self._fieldnames])
value_list = imap(lambda fn: (str(fn), str('__mv_') + str(fn)), fieldnames)
self._writerow(list(chain.from_iterable(value_list)))
@@ -580,7 +634,7 @@ def _write_record(self, record):
value = str(value.real)
elif value_t is six.text_type:
value = value
- elif value_t is int or value_t is int or value_t is float or value_t is complex:
+ elif isinstance(value, six.integer_types) or value_t is float or value_t is complex:
value = str(value)
elif issubclass(value_t, (dict, list, tuple)):
value = str(''.join(RecordWriter._iterencode_json(value, 0)))
@@ -610,7 +664,7 @@ def _write_record(self, record):
values += (value, None)
continue
- if value_t is int or value_t is int or value_t is float or value_t is complex:
+ if isinstance(value, six.integer_types) or value_t is float or value_t is complex:
values += (str(value), None)
continue
@@ -621,9 +675,9 @@ def _write_record(self, record):
values += (repr(value), None)
self._writerow(values)
- self._record_count += 1
+ self._pending_record_count += 1
- if self._record_count >= self._maxresultrows:
+ if self.pending_record_count >= self._maxresultrows:
self.flush(partial=True)
try:
@@ -660,10 +714,9 @@ def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
- if self._record_count > 0 or (self._chunk_count == 0 and 'messages' in self._inspector):
+ if self.pending_record_count > 0 or (self._chunk_count == 0 and 'messages' in self._inspector):
messages = self._inspector.get('messages')
- write = self._ofile.write
if self._chunk_count == 0:
@@ -675,12 +728,12 @@ def flush(self, finished=None, partial=None):
message_level = RecordWriterV1._message_level.get
for level, text in messages:
- write(message_level(level, level))
- write('=')
- write(text)
- write('\r\n')
+ self.write(message_level(level, level))
+ self.write('=')
+ self.write(text)
+ self.write('\r\n')
- write('\r\n')
+ self.write('\r\n')
elif messages is not None:
@@ -698,10 +751,10 @@ def flush(self, finished=None, partial=None):
for level, text in messages:
print(level, text, file=stderr)
- write(self._buffer.getvalue())
- self._clear()
+ self.write(self._buffer.getvalue())
self._chunk_count += 1
- self._total_record_count += self._record_count
+ self._committed_record_count += self.pending_record_count
+ self._clear()
self._finished = finished is True
@@ -719,44 +772,43 @@ class RecordWriterV2(RecordWriter):
def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
- inspector = self._inspector
-
- if self._flushed is False:
-
- self._total_record_count += self._record_count
- self._chunk_count += 1
-
- # TODO: DVPL-6448: splunklib.searchcommands | Add support for partial: true when it is implemented in
- # ChunkedExternProcessor (See SPL-103525)
- #
- # We will need to replace the following block of code with this block:
- #
- # metadata = [
- # ('inspector', self._inspector if len(self._inspector) else None),
- # ('finished', finished),
- # ('partial', partial)]
- if len(inspector) == 0:
- inspector = None
-
- if partial is True:
- finished = False
-
- metadata = [item for item in (('inspector', inspector), ('finished', finished))]
- self._write_chunk(metadata, self._buffer.getvalue())
- self._clear()
+ if partial or not finished:
+ # Don't flush partial chunks, since the SCP v2 protocol does not
+ # provide a way to send partial chunks yet.
+ return
- elif finished is True:
- self._write_chunk((('finished', True),), '')
+ if not self.is_flushed:
+ self.write_chunk(finished=True)
- self._finished = finished is True
+ def write_chunk(self, finished=None):
+ inspector = self._inspector
+ self._committed_record_count += self.pending_record_count
+ self._chunk_count += 1
+
+ # TODO: DVPL-6448: splunklib.searchcommands | Add support for partial: true when it is implemented in
+ # ChunkedExternProcessor (See SPL-103525)
+ #
+ # We will need to replace the following block of code with this block:
+ #
+ # metadata = [item for item in (('inspector', inspector), ('finished', finished), ('partial', partial))]
+ #
+ # if partial is True:
+ # finished = False
+
+ if len(inspector) == 0:
+ inspector = None
+
+ metadata = [item for item in (('inspector', inspector), ('finished', finished))]
+ self._write_chunk(metadata, self._buffer.getvalue())
+ self._clear()
def write_metadata(self, configuration):
self._ensure_validity()
metadata = chain(six.iteritems(configuration), (('inspector', self._inspector if self._inspector else None),))
self._write_chunk(metadata, '')
- self._ofile.write('\n')
+ self.write('\n')
self._clear()
def write_metric(self, name, value):
@@ -764,26 +816,29 @@ def write_metric(self, name, value):
self._inspector['metric.' + name] = value
def _clear(self):
- RecordWriter._clear(self)
+ super(RecordWriterV2, self)._clear()
self._fieldnames = None
def _write_chunk(self, metadata, body):
if metadata:
metadata = str(''.join(self._iterencode_json(dict([(n, v) for n, v in metadata if v is not None]), 0)))
+ if sys.version_info >= (3, 0):
+ metadata = metadata.encode('utf-8')
metadata_length = len(metadata)
else:
metadata_length = 0
+ if sys.version_info >= (3, 0):
+ body = body.encode('utf-8')
body_length = len(body)
if not (metadata_length > 0 or body_length > 0):
return
start_line = 'chunked 1.0,%s,%s\n' % (metadata_length, body_length)
- write = self._ofile.write
- write(start_line)
- write(metadata)
- write(body)
+ self.write(start_line)
+ self.write(metadata)
+ self.write(body)
self._ofile.flush()
- self._flushed = False
+ self._flushed = True
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/reporting_command.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/reporting_command.py
index b9fb2af..9470861 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/reporting_command.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/reporting_command.py
@@ -23,7 +23,7 @@
from .streaming_command import StreamingCommand
from .search_command import SearchCommand
from .validators import Set
-from .. import six
+from splunklib import six
class ReportingCommand(SearchCommand):
@@ -253,7 +253,7 @@ def fix_up(cls, command):
cls._requires_preop = False
return
- f = vars(command)[b'map'] # Function backing the map method
+ f = vars(command)['map'] # Function backing the map method
# EXPLANATION OF PREVIOUS STATEMENT: There is no way to add custom attributes to methods. See [Why does
# setattr fail on a method](http://stackoverflow.com/questions/7891277/why-does-setattr-fail-on-a-bound-method) for a discussion of this issue.
@@ -266,7 +266,7 @@ def fix_up(cls, command):
# Create new StreamingCommand.ConfigurationSettings class
- module = command.__module__ + b'.' + command.__name__ + b'.map'
+ module = command.__module__ + '.' + command.__name__ + '.map'
name = b'ConfigurationSettings'
bases = (StreamingCommand.ConfigurationSettings,)
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/search_command.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/search_command.py
index 965e894..dd11391 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/search_command.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/search_command.py
@@ -22,15 +22,12 @@
import io
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from ..ordereddict import OrderedDict
+from collections import OrderedDict
from copy import deepcopy
-from ..six.moves import StringIO
+from splunklib.six.moves import StringIO
from itertools import chain, islice
-from ..six.moves import filter as ifilter, map as imap, zip as izip
-from .. import six
+from splunklib.six.moves import filter as ifilter, map as imap, zip as izip
+from splunklib import six
if six.PY2:
from logging import _levelNames, getLevelName, getLogger
else:
@@ -41,8 +38,8 @@
# Used for recording, skip on python 2.6
pass
from time import time
-from ..six.moves.urllib.parse import unquote
-from ..six.moves.urllib.parse import urlsplit
+from splunklib.six.moves.urllib.parse import unquote
+from splunklib.six.moves.urllib.parse import urlsplit
from warnings import warn
from xml.etree import ElementTree
@@ -124,6 +121,7 @@ def __init__(self):
self._default_logging_level = self._logger.level
self._record_writer = None
self._records = None
+ self._allow_empty_input = True
def __str__(self):
text = ' '.join(chain((type(self).name, str(self.options)), [] if self.fieldnames is None else self.fieldnames))
@@ -172,6 +170,14 @@ def logging_level(self, value):
raise ValueError('Unrecognized logging level: {}'.format(value))
self._logger.setLevel(level)
+ def add_field(self, current_record, field_name, field_value):
+ self._record_writer.custom_fields.add(field_name)
+ current_record[field_name] = field_value
+
+ def gen_record(self, **record):
+ self._record_writer.custom_fields |= set(record.keys())
+ return record
+
record = Option(doc='''
**Syntax: record=
@@ -256,7 +262,7 @@ def search_results_info(self):
invocation.
:return: Search results info:const:`None`, if the search results info file associated with the command
- invocation is inaccessible.
+ invocation is inaccessible.
:rtype: SearchResultsInfo or NoneType
"""
@@ -338,6 +344,7 @@ def service(self):
specifying this pair of configuration settings in commands.conf:
.. code-block:: python
+
enableheader = true
requires_srinfo = true
@@ -345,8 +352,8 @@ def service(self):
:code:`requires_srinfo` setting is false by default. Hence, you must set it.
:return: :class:`splunklib.client.Service`, if :code:`enableheader` and :code:`requires_srinfo` are both
- :code:`true`. Otherwise, if either :code:`enableheader` or :code:`requires_srinfo` are :code:`false`, a value
- of :code:`None` is returned.
+ :code:`true`. Otherwise, if either :code:`enableheader` or :code:`requires_srinfo` are :code:`false`, a value
+ of :code:`None` is returned.
"""
if self._service is not None:
@@ -397,7 +404,7 @@ def flush(self):
:return: :const:`None`
"""
- self._record_writer.flush(partial=True)
+ self._record_writer.flush(finished=False)
def prepare(self):
""" Prepare for execution.
@@ -412,7 +419,7 @@ def prepare(self):
"""
pass
- def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout):
+ def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout, allow_empty_input=True):
""" Process data.
:param argv: Command line arguments.
@@ -424,10 +431,16 @@ def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout):
:param ofile: Output data file.
:type ofile: file
+ :param allow_empty_input: Allow empty input records for the command, if False an Error will be returned if empty chunk body is encountered when read
+ :type allow_empty_input: bool
+
:return: :const:`None`
:rtype: NoneType
"""
+
+ self._allow_empty_input = allow_empty_input
+
if len(argv) > 1:
self._process_protocol_v1(argv, ifile, ofile)
else:
@@ -633,6 +646,19 @@ def _process_protocol_v1(self, argv, ifile, ofile):
debug('%s.process finished under protocol_version=1', class_name)
+ def _protocol_v2_option_parser(self, arg):
+ """ Determines if an argument is an Option/Value pair, or just a Positional Argument.
+ Method so different search commands can handle parsing of arguments differently.
+
+ :param arg: A single argument provided to the command from SPL
+ :type arg: str
+
+ :return: [OptionName, OptionValue] OR [PositionalArgument]
+ :rtype: List[str]
+
+ """
+ return arg.split('=', 1)
+
def _process_protocol_v2(self, argv, ifile, ofile):
""" Processes records on the `input stream optionally writing records to the output stream.
@@ -655,7 +681,7 @@ def _process_protocol_v2(self, argv, ifile, ofile):
# noinspection PyBroadException
try:
debug('Reading metadata')
- metadata, body = self._read_chunk(ifile)
+ metadata, body = self._read_chunk(self._as_binary_stream(ifile))
action = getattr(metadata, 'action', None)
@@ -703,7 +729,7 @@ def _process_protocol_v2(self, argv, ifile, ofile):
if args and type(args) == list:
for arg in args:
- result = arg.split('=', 1)
+ result = self._protocol_v2_option_parser(arg)
if len(result) == 1:
self.fieldnames.append(str(result[0]))
else:
@@ -775,7 +801,6 @@ def _process_protocol_v2(self, argv, ifile, ofile):
# noinspection PyBroadException
try:
debug('Executing under protocol_version=2')
- self._records = self._records_protocol_v2
self._metadata.action = 'execute'
self._execute(ifile, None)
except SystemExit:
@@ -809,15 +834,15 @@ def write_metric(self, name, value):
:param name: Name of the metric.
:type name: basestring
- :param value: A 4-tuple containing the value of metric :param:`name` where
+ :param value: A 4-tuple containing the value of metric ``name`` where
value[0] = Elapsed seconds or :const:`None`.
value[1] = Number of invocations or :const:`None`.
value[2] = Input count or :const:`None`.
value[3] = Output count or :const:`None`.
- The :data:`SearchMetric` type provides a convenient encapsulation of :param:`value`.
- The :data:`SearchMetric` type provides a convenient encapsulation of :param:`value`.
+ The :data:`SearchMetric` type provides a convenient encapsulation of ``value``.
+ The :data:`SearchMetric` type provides a convenient encapsulation of ``value``.
:return: :const:`None`.
@@ -832,6 +857,8 @@ def _decode_list(mv):
_encoded_value = re.compile(r'\$(?P- (?:\$\$|[^$])*)\$(?:;|$)') # matches a single value in an encoded list
+ # Note: Subclasses must override this method so that it can be called
+ # called as self._execute(ifile, None)
def _execute(self, ifile, process):
""" Default processing loop
@@ -845,21 +872,38 @@ def _execute(self, ifile, process):
:rtype: NoneType
"""
- self._record_writer.write_records(process(self._records(ifile)))
- self.finish()
+ if self.protocol_version == 1:
+ self._record_writer.write_records(process(self._records(ifile)))
+ self.finish()
+ else:
+ assert self._protocol_version == 2
+ self._execute_v2(ifile, process)
+
+ @staticmethod
+ def _as_binary_stream(ifile):
+ naught = ifile.read(0)
+ if isinstance(naught, bytes):
+ return ifile
+
+ try:
+ return ifile.buffer
+ except AttributeError as error:
+ raise RuntimeError('Failed to get underlying buffer: {}'.format(error))
@staticmethod
- def _read_chunk(ifile):
+ def _read_chunk(istream):
# noinspection PyBroadException
+ assert isinstance(istream.read(0), six.binary_type), 'Stream must be binary'
+
try:
- header = ifile.readline()
+ header = istream.readline()
except Exception as error:
raise RuntimeError('Failed to read transport header: {}'.format(error))
if not header:
return None
- match = SearchCommand._header.match(header)
+ match = SearchCommand._header.match(six.ensure_str(header))
if match is None:
raise RuntimeError('Failed to parse transport header: {}'.format(header))
@@ -869,14 +913,14 @@ def _read_chunk(ifile):
body_length = int(body_length)
try:
- metadata = ifile.read(metadata_length)
+ metadata = istream.read(metadata_length)
except Exception as error:
raise RuntimeError('Failed to read metadata of length {}: {}'.format(metadata_length, error))
decoder = MetadataDecoder()
try:
- metadata = decoder.decode(metadata)
+ metadata = decoder.decode(six.ensure_str(metadata))
except Exception as error:
raise RuntimeError('Failed to parse metadata of length {}: {}'.format(metadata_length, error))
@@ -886,16 +930,18 @@ def _read_chunk(ifile):
body = ""
try:
if body_length > 0:
- body = ifile.read(body_length)
+ body = istream.read(body_length)
except Exception as error:
raise RuntimeError('Failed to read body of length {}: {}'.format(body_length, error))
- return metadata, body
+ return metadata, six.ensure_str(body)
_header = re.compile(r'chunked\s+1.0\s*,\s*(\d+)\s*,\s*(\d+)\s*\n')
def _records_protocol_v1(self, ifile):
+ return self._read_csv_records(ifile)
+ def _read_csv_records(self, ifile):
reader = csv.reader(ifile, dialect=CsvDialect)
try:
@@ -920,51 +966,37 @@ def _records_protocol_v1(self, ifile):
record[fieldname] = value
yield record
- def _records_protocol_v2(self, ifile):
+ def _execute_v2(self, ifile, process):
+ istream = self._as_binary_stream(ifile)
while True:
- result = self._read_chunk(ifile)
+ result = self._read_chunk(istream)
if not result:
return
metadata, body = result
action = getattr(metadata, 'action', None)
-
if action != 'execute':
raise RuntimeError('Expected execute action, not {}'.format(action))
- finished = getattr(metadata, 'finished', False)
+ self._finished = getattr(metadata, 'finished', False)
self._record_writer.is_flushed = False
- if len(body) > 0:
- reader = csv.reader(StringIO(body), dialect=CsvDialect)
+ self._execute_chunk_v2(process, result)
- try:
- fieldnames = next(reader)
- except StopIteration:
- return
+ self._record_writer.write_chunk(finished=self._finished)
- mv_fieldnames = dict([(name, name[len('__mv_'):]) for name in fieldnames if name.startswith('__mv_')])
+ def _execute_chunk_v2(self, process, chunk):
+ metadata, body = chunk
- if len(mv_fieldnames) == 0:
- for values in reader:
- yield OrderedDict(izip(fieldnames, values))
- else:
- for values in reader:
- record = OrderedDict()
- for fieldname, value in izip(fieldnames, values):
- if fieldname.startswith('__mv_'):
- if len(value) > 0:
- record[mv_fieldnames[fieldname]] = self._decode_list(value)
- elif fieldname not in record:
- record[fieldname] = value
- yield record
-
- if finished:
- return
+ if len(body) <= 0 and not self._allow_empty_input:
+ raise ValueError(
+ "No records found to process. Set allow_empty_input=True in dispatch function to move forward "
+ "with empty records.")
- self.flush()
+ records = self._read_csv_records(StringIO(body))
+ self._record_writer.write_records(process(records))
def _report_unexpected_error(self):
@@ -1035,6 +1067,8 @@ def fix_up(cls, command_class):
"""
return
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
definitions = type(self).configuration_setting_definitions
version = self.command.protocol_version
@@ -1043,7 +1077,9 @@ def iteritems(self):
lambda setting: (setting.name, setting.__get__(self)), ifilter(
lambda setting: setting.is_supported_by_protocol(version), definitions)))
- items = iteritems
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
pass # endregion
@@ -1053,7 +1089,7 @@ def iteritems(self):
SearchMetric = namedtuple('SearchMetric', ('elapsed_seconds', 'invocation_count', 'input_count', 'output_count'))
-def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys.stdout, module_name=None):
+def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys.stdout, module_name=None, allow_empty_input=True):
""" Instantiates and executes a search command class
This function implements a `conditional script stanza `_ based on the value of
@@ -1076,11 +1112,13 @@ def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys
:type output_file: :code:`file`
:param module_name: Name of the module calling :code:`dispatch` or :const:`None`.
:type module_name: :code:`basestring`
+ :param allow_empty_input: Allow empty input records for the command, if False an Error will be returned if empty chunk body is encountered when read
+ :type allow_empty_input: bool
:returns: :const:`None`
**Example**
- .. code-block:: python
+ .. code-block:: python
:linenos:
#!/usr/bin/env python
@@ -1096,7 +1134,7 @@ def stream(records):
**Example**
- .. code-block:: python
+ .. code-block:: python
:linenos:
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option, validators
@@ -1113,4 +1151,4 @@ def stream(records):
assert issubclass(command_class, SearchCommand)
if module_name is None or module_name == '__main__':
- command_class().process(argv, input_file, output_file)
+ command_class().process(argv, input_file, output_file, allow_empty_input)
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/streaming_command.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/streaming_command.py
index cf5c0f4..fa075ed 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/streaming_command.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/streaming_command.py
@@ -16,7 +16,8 @@
from __future__ import absolute_import, division, print_function, unicode_literals
-from ..six.moves import map as imap, filter as ifilter
+from splunklib import six
+from splunklib.six.moves import map as imap, filter as ifilter
from .decorators import ConfigurationSetting
from .search_command import SearchCommand
@@ -172,6 +173,8 @@ def fix_up(cls, command):
raise AttributeError('No StreamingCommand.stream override')
return
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
iteritems = SearchCommand.ConfigurationSettings.iteritems(self)
version = self.command.protocol_version
@@ -185,4 +188,8 @@ def iteritems(self):
lambda name_value1: (name_value1[0], 'stateful') if name_value1[0] == 'type' else (name_value1[0], name_value1[1]), iteritems)
return iteritems
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
+
# endregion
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/validators.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/validators.py
index b5fddc7..22f0e16 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/validators.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/searchcommands/validators.py
@@ -18,13 +18,13 @@
from json.encoder import encode_basestring_ascii as json_encode_string
from collections import namedtuple
-from ..six.moves import StringIO
+from splunklib.six.moves import StringIO
from io import open
import csv
import os
import re
-from .. import six
-from ..six.moves import getcwd
+from splunklib import six
+from splunklib.six.moves import getcwd
class Validator(object):
@@ -81,9 +81,9 @@ class Code(Validator):
def __init__(self, mode='eval'):
"""
:param mode: Specifies what kind of code must be compiled; it can be :const:`'exec'`, if source consists of a
- sequence of statements, :const:`'eval'`, if it consists of a single expression, or :const:`'single'` if it
- consists of a single interactive statement. In the latter case, expression statements that evaluate to
- something other than :const:`None` will be printed.
+ sequence of statements, :const:`'eval'`, if it consists of a single expression, or :const:`'single'` if it
+ consists of a single interactive statement. In the latter case, expression statements that evaluate to
+ something other than :const:`None` will be printed.
:type mode: unicode or bytes
"""
@@ -95,7 +95,9 @@ def __call__(self, value):
try:
return Code.object(compile(value, 'string', self._mode), six.text_type(value))
except (SyntaxError, TypeError) as error:
- raise ValueError(error.message)
+ message = str(error)
+
+ six.raise_from(ValueError(message), error)
def format(self, value):
return None if value is None else value.source
@@ -199,6 +201,48 @@ def format(self, value):
return None if value is None else six.text_type(int(value))
+class Float(Validator):
+ """ Validates float option values.
+
+ """
+ def __init__(self, minimum=None, maximum=None):
+ if minimum is not None and maximum is not None:
+ def check_range(value):
+ if not (minimum <= value <= maximum):
+ raise ValueError('Expected float in the range [{0},{1}], not {2}'.format(minimum, maximum, value))
+ return
+ elif minimum is not None:
+ def check_range(value):
+ if value < minimum:
+ raise ValueError('Expected float in the range [{0},+∞], not {1}'.format(minimum, value))
+ return
+ elif maximum is not None:
+ def check_range(value):
+ if value > maximum:
+ raise ValueError('Expected float in the range [-∞,{0}], not {1}'.format(maximum, value))
+ return
+ else:
+ def check_range(value):
+ return
+
+ self.check_range = check_range
+ return
+
+ def __call__(self, value):
+ if value is None:
+ return None
+ try:
+ value = float(value)
+ except ValueError:
+ raise ValueError('Expected float value, not {}'.format(json_encode_string(value)))
+
+ self.check_range(value)
+ return value
+
+ def format(self, value):
+ return None if value is None else six.text_type(float(value))
+
+
class Duration(Validator):
""" Validates duration option values.
@@ -249,10 +293,10 @@ class List(Validator):
class Dialect(csv.Dialect):
""" Describes the properties of list option values. """
strict = True
- delimiter = b','
- quotechar = b'"'
+ delimiter = str(',')
+ quotechar = str('"')
doublequote = True
- lineterminator = b'\n'
+ lineterminator = str('\n')
skipinitialspace = True
quoting = csv.QUOTE_MINIMAL
@@ -386,4 +430,4 @@ def format(self, value):
return self.__call__(value)
-__all__ = ['Boolean', 'Code', 'Duration', 'File', 'Integer', 'List', 'Map', 'RegularExpression', 'Set']
+__all__ = ['Boolean', 'Code', 'Duration', 'File', 'Integer', 'Float', 'List', 'Map', 'RegularExpression', 'Set']
diff --git a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/six.py b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/six.py
index 190c023..d13e50c 100644
--- a/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/six.py
+++ b/bin/ta_dmarc/aob_py2/solnlib/packages/splunklib/six.py
@@ -1,6 +1,4 @@
-"""Utilities for writing code that runs on Python 2 and 3"""
-
-# Copyright (c) 2010-2015 Benjamin Peterson
+# Copyright (c) 2010-2020 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@@ -20,6 +18,8 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
+"""Utilities for writing code that runs on Python 2 and 3"""
+
from __future__ import absolute_import
import functools
@@ -29,7 +29,7 @@
import types
__author__ = "Benjamin Peterson "
-__version__ = "1.10.0"
+__version__ = "1.14.0"
# Useful for very coarse version differentiation.
@@ -241,6 +241,7 @@ class _MovedItems(_LazyModule):
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("getoutput", "commands", "subprocess"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
@@ -254,18 +255,21 @@ class _MovedItems(_LazyModule):
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
+ MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
- MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+ MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
- MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
@@ -337,10 +341,12 @@ class Module_six_moves_urllib_parse(_LazyModule):
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("splitvalue", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
@@ -416,6 +422,8 @@ class Module_six_moves_urllib_request(_LazyModule):
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+ MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
+ MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
@@ -631,13 +639,16 @@ def u(s):
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
+ del io
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
+ _assertNotRegex = "assertNotRegex"
else:
def b(s):
return s
@@ -659,6 +670,7 @@ def indexbytes(buf, i):
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
@@ -675,15 +687,23 @@ def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
+def assertNotRegex(self, *args, **kwargs):
+ return getattr(self, _assertNotRegex)(*args, **kwargs)
+
+
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
- if value is None:
- value = tp()
- if value.__traceback__ is not tb:
- raise value.with_traceback(tb)
- raise value
+ try:
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+ finally:
+ value = None
+ tb = None
else:
def exec_(_code_, _globs_=None, _locs_=None):
@@ -699,19 +719,19 @@ def exec_(_code_, _globs_=None, _locs_=None):
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
- raise tp, value, tb
+ try:
+ raise tp, value, tb
+ finally:
+ tb = None
""")
-if sys.version_info[:2] == (3, 2):
+if sys.version_info[:2] > (3,):
exec_("""def raise_from(value, from_value):
- if from_value is None:
- raise value
- raise value from from_value
-""")
-elif sys.version_info[:2] > (3, 2):
- exec_("""def raise_from(value, from_value):
- raise value from from_value
+ try:
+ raise value from from_value
+ finally:
+ value = None
""")
else:
def raise_from(value, from_value):
@@ -786,13 +806,33 @@ def print_(*args, **kwargs):
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
+ # This does exactly the same what the :func:`py3:functools.update_wrapper`
+ # function does on Python versions after 3.2. It sets the ``__wrapped__``
+ # attribute on ``wrapper`` object and it doesn't raise an error if any of
+ # the attributes mentioned in ``assigned`` and ``updated`` are missing on
+ # ``wrapped`` object.
+ def _update_wrapper(wrapper, wrapped,
+ assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ for attr in assigned:
+ try:
+ value = getattr(wrapped, attr)
+ except AttributeError:
+ continue
+ else:
+ setattr(wrapper, attr, value)
+ for attr in updated:
+ getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+ wrapper.__wrapped__ = wrapped
+ return wrapper
+ _update_wrapper.__doc__ = functools.update_wrapper.__doc__
+
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
- def wrapper(f):
- f = functools.wraps(wrapped, assigned, updated)(f)
- f.__wrapped__ = wrapped
- return f
- return wrapper
+ return functools.partial(_update_wrapper, wrapped=wrapped,
+ assigned=assigned, updated=updated)
+ wraps.__doc__ = functools.wraps.__doc__
+
else:
wraps = functools.wraps
@@ -802,10 +842,22 @@ def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
- class metaclass(meta):
+ class metaclass(type):
def __new__(cls, name, this_bases, d):
- return meta(name, bases, d)
+ if sys.version_info[:2] >= (3, 7):
+ # This version introduced PEP 560 that requires a bit
+ # of extra care (we mimic what is done by __build_class__).
+ resolved_bases = types.resolve_bases(bases)
+ if resolved_bases is not bases:
+ d['__orig_bases__'] = bases
+ else:
+ resolved_bases = bases
+ return meta(name, resolved_bases, d)
+
+ @classmethod
+ def __prepare__(cls, name, this_bases):
+ return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
@@ -821,13 +873,73 @@ def wrapper(cls):
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
+ if hasattr(cls, '__qualname__'):
+ orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
+def ensure_binary(s, encoding='utf-8', errors='strict'):
+ """Coerce **s** to six.binary_type.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> encoded to `bytes`
+ - `bytes` -> `bytes`
+ """
+ if isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ elif isinstance(s, binary_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def ensure_str(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to `str`.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if not isinstance(s, (text_type, binary_type)):
+ raise TypeError("not expecting type '%s'" % type(s))
+ if PY2 and isinstance(s, text_type):
+ s = s.encode(encoding, errors)
+ elif PY3 and isinstance(s, binary_type):
+ s = s.decode(encoding, errors)
+ return s
+
+
+def ensure_text(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to six.text_type.
+
+ For Python 2:
+ - `unicode` -> `unicode`
+ - `str` -> `unicode`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif isinstance(s, text_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
def python_2_unicode_compatible(klass):
"""
- A decorator that defines __unicode__ and __str__ methods under Python 2.
+ A class decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
@@ -866,3 +978,16 @@ def python_2_unicode_compatible(klass):
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
+
+import warnings
+
+def deprecated(message):
+ def deprecated_decorator(func):
+ def deprecated_func(*args, **kwargs):
+ warnings.warn("{} is a deprecated function. {}".format(func.__name__, message),
+ category=DeprecationWarning,
+ stacklevel=2)
+ warnings.simplefilter('default', DeprecationWarning)
+ return func(*args, **kwargs)
+ return deprecated_func
+ return deprecated_decorator
\ No newline at end of file
diff --git a/bin/ta_dmarc/aob_py2/splunklib/__init__.py b/bin/ta_dmarc/aob_py2/splunklib/__init__.py
index 59daf9e..1f9fc68 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/__init__.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/__init__.py
@@ -16,5 +16,20 @@
from __future__ import absolute_import
from splunklib.six.moves import map
-__version_info__ = (1, 6, 6)
+import logging
+
+DEFAULT_LOG_FORMAT = '%(asctime)s, Level=%(levelname)s, Pid=%(process)s, Logger=%(name)s, File=%(filename)s, ' \
+ 'Line=%(lineno)s, %(message)s'
+DEFAULT_DATE_FORMAT = '%Y-%m-%d %H:%M:%S %Z'
+
+
+# To set the logging level of splunklib
+# ex. To enable debug logs, call this method with parameter 'logging.DEBUG'
+# default logging level is set to 'WARNING'
+def setup_logging(level, log_format=DEFAULT_LOG_FORMAT, date_format=DEFAULT_DATE_FORMAT):
+ logging.basicConfig(level=level,
+ format=log_format,
+ datefmt=date_format)
+
+__version_info__ = (1, 6, 20)
__version__ = ".".join(map(str, __version_info__))
diff --git a/bin/ta_dmarc/aob_py2/splunklib/binding.py b/bin/ta_dmarc/aob_py2/splunklib/binding.py
index 3fe7c84..bb2771d 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/binding.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/binding.py
@@ -31,6 +31,7 @@
import socket
import ssl
import sys
+import time
from base64 import b64encode
from contextlib import contextmanager
from datetime import datetime
@@ -39,7 +40,6 @@
from xml.etree.ElementTree import XML
from splunklib import six
-from splunklib.six import StringIO
from splunklib.six.moves import urllib
from .data import record
@@ -49,6 +49,7 @@
except ImportError as e:
from xml.parsers.expat import ExpatError as ParseError
+logger = logging.getLogger(__name__)
__all__ = [
"AuthenticationError",
@@ -70,7 +71,7 @@ def new_f(*args, **kwargs):
start_time = datetime.now()
val = f(*args, **kwargs)
end_time = datetime.now()
- logging.debug("Operation took %s", end_time-start_time)
+ logger.debug("Operation took %s", end_time-start_time)
return val
return new_f
@@ -80,6 +81,7 @@ def _parse_cookies(cookie_str, dictionary):
then updates the the dictionary with any key-value pairs found.
**Example**::
+
dictionary = {}
_parse_cookies('my=value', dictionary)
# Now the following is True
@@ -295,8 +297,7 @@ def wrapper(self, *args, **kwargs):
with _handle_auth_error("Autologin failed."):
self.login()
with _handle_auth_error(
- "Autologin succeeded, but there was an auth error on "
- "next request. Something is very wrong."):
+ "Authentication Failed! If session token is used, it seems to have been expired."):
return request_fun(self, *args, **kwargs)
elif he.status == 401 and not self.autologin:
raise AuthenticationError(
@@ -449,8 +450,16 @@ class Context(object):
:type username: ``string``
:param password: The password for the Splunk account.
:type password: ``string``
+ :param splunkToken: Splunk authentication token
+ :type splunkToken: ``string``
:param headers: List of extra HTTP headers to send (optional).
:type headers: ``list`` of 2-tuples.
+ :param retires: Number of retries for each HTTP connection (optional, the default is 0).
+ NOTE THAT THIS MAY INCREASE THE NUMBER OF ROUND TRIP CONNECTIONS TO THE SPLUNK SERVER AND BLOCK THE
+ CURRENT THREAD WHILE RETRYING.
+ :type retries: ``int``
+ :param retryDelay: How long to wait between connection attempts if `retries` > 0 (optional, defaults to 10s).
+ :type retryDelay: ``int`` (in seconds)
:param handler: The HTTP request handler (optional).
:returns: A ``Context`` instance.
@@ -468,7 +477,8 @@ class Context(object):
"""
def __init__(self, handler=None, **kwargs):
self.http = HttpLib(handler, kwargs.get("verify", False), key_file=kwargs.get("key_file"),
- cert_file=kwargs.get("cert_file")) # Default to False for backward compat
+ cert_file=kwargs.get("cert_file"), context=kwargs.get("context"), # Default to False for backward compat
+ retries=kwargs.get("retries", 0), retryDelay=kwargs.get("retryDelay", 10))
self.token = kwargs.get("token", _NoAuthenticationToken)
if self.token is None: # In case someone explicitly passes token=None
self.token = _NoAuthenticationToken
@@ -480,6 +490,7 @@ def __init__(self, handler=None, **kwargs):
self.username = kwargs.get("username", "")
self.password = kwargs.get("password", "")
self.basic = kwargs.get("basic", False)
+ self.bearerToken = kwargs.get("splunkToken", "")
self.autologin = kwargs.get("autologin", False)
self.additional_headers = kwargs.get("headers", [])
@@ -496,13 +507,13 @@ def get_cookies(self):
return self.http._cookies
def has_cookies(self):
- """Returns true if the ``HttpLib`` member of this instance has at least
- one cookie stored.
+ """Returns true if the ``HttpLib`` member of this instance has auth token stored.
- :return: ``True`` if there is at least one cookie, else ``False``
+ :return: ``True`` if there is auth token present, else ``False``
:rtype: ``bool``
"""
- return len(self.get_cookies()) > 0
+ auth_token_key = "splunkd_"
+ return any(auth_token_key in key for key in self.get_cookies().keys())
# Shared per-context request headers
@property
@@ -520,6 +531,9 @@ def _auth_headers(self):
elif self.basic and (self.username and self.password):
token = 'Basic %s' % b64encode(("%s:%s" % (self.username, self.password)).encode('utf-8')).decode('ascii')
return [("Authorization", token)]
+ elif self.bearerToken:
+ token = 'Bearer %s' % self.bearerToken
+ return [("Authorization", token)]
elif self.token is _NoAuthenticationToken:
return []
else:
@@ -611,7 +625,7 @@ def delete(self, path_segment, owner=None, app=None, sharing=None, **query):
"""
path = self.authority + self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
- logging.debug("DELETE request to %s (body: %s)", path, repr(query))
+ logger.debug("DELETE request to %s (body: %s)", path, repr(query))
response = self.http.delete(path, self._auth_headers, **query)
return response
@@ -674,7 +688,7 @@ def get(self, path_segment, owner=None, app=None, headers=None, sharing=None, **
path = self.authority + self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
- logging.debug("GET request to %s (body: %s)", path, repr(query))
+ logger.debug("GET request to %s (body: %s)", path, repr(query))
all_headers = headers + self.additional_headers + self._auth_headers
response = self.http.get(path, all_headers, **query)
return response
@@ -717,7 +731,12 @@ def post(self, path_segment, owner=None, app=None, sharing=None, headers=None, *
:type headers: ``list`` of 2-tuples.
:param query: All other keyword arguments, which are used as query
parameters.
- :type query: ``string``
+ :param body: Parameters to be used in the post body. If specified,
+ any parameters in the query will be applied to the URL instead of
+ the body. If a dict is supplied, the key-value pairs will be form
+ encoded. If a string is supplied, the body will be passed through
+ in the request unchanged.
+ :type body: ``dict`` or ``str``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
@@ -747,14 +766,20 @@ def post(self, path_segment, owner=None, app=None, sharing=None, headers=None, *
headers = []
path = self.authority + self._abspath(path_segment, owner=owner, app=app, sharing=sharing)
- logging.debug("POST request to %s (body: %s)", path, repr(query))
+
+ # To avoid writing sensitive data in debug logs
+ endpoint_having_sensitive_data = ["/storage/passwords"]
+ if any(endpoint in path for endpoint in endpoint_having_sensitive_data):
+ logger.debug("POST request to %s ", path)
+ else:
+ logger.debug("POST request to %s (body: %s)", path, repr(query))
all_headers = headers + self.additional_headers + self._auth_headers
response = self.http.post(path, all_headers, **query)
return response
@_authentication
@_log_duration
- def request(self, path_segment, method="GET", headers=None, body="",
+ def request(self, path_segment, method="GET", headers=None, body={},
owner=None, app=None, sharing=None):
"""Issues an arbitrary HTTP request to the REST path segment.
@@ -783,9 +808,6 @@ def request(self, path_segment, method="GET", headers=None, body="",
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
- :param query: All other keyword arguments, which are used as query
- parameters.
- :type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
@@ -814,13 +836,28 @@ def request(self, path_segment, method="GET", headers=None, body="",
path = self.authority \
+ self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
+
all_headers = headers + self.additional_headers + self._auth_headers
- logging.debug("%s request to %s (headers: %s, body: %s)",
+ logger.debug("%s request to %s (headers: %s, body: %s)",
method, path, str(all_headers), repr(body))
- response = self.http.request(path,
- {'method': method,
- 'headers': all_headers,
- 'body': body})
+
+ if body:
+ body = _encode(**body)
+
+ if method == "GET":
+ path = path + UrlEncoded('?' + body, skip_encode=True)
+ message = {'method': method,
+ 'headers': all_headers}
+ else:
+ message = {'method': method,
+ 'headers': all_headers,
+ 'body': body}
+ else:
+ message = {'method': method,
+ 'headers': all_headers}
+
+ response = self.http.request(path, message)
+
return response
def login(self):
@@ -862,6 +899,10 @@ def login(self):
# as credentials were passed in.
return
+ if self.bearerToken:
+ # Bearer auth mode requested, so this method is a nop as long
+ # as authentication token was passed in.
+ return
# Only try to get a token and updated cookie if username & password are specified
try:
response = self.http.post(
@@ -1054,7 +1095,7 @@ def __init__(self, message, cause):
#
# Encode the given kwargs as a query string. This wrapper will also _encode
-# a list value as a sequence of assignemnts to the corresponding arg name,
+# a list value as a sequence of assignments to the corresponding arg name,
# for example an argument such as 'foo=[1,2,3]' will be encoded as
# 'foo=1&foo=2&foo=3'.
def _encode(**kwargs):
@@ -1121,12 +1162,14 @@ class HttpLib(object):
If using the default handler, SSL verification can be disabled by passing verify=False.
"""
- def __init__(self, custom_handler=None, verify=False, key_file=None, cert_file=None):
+ def __init__(self, custom_handler=None, verify=False, key_file=None, cert_file=None, context=None, retries=0, retryDelay=10):
if custom_handler is None:
- self.handler = handler(verify=verify, key_file=key_file, cert_file=cert_file)
+ self.handler = handler(verify=verify, key_file=key_file, cert_file=cert_file, context=context)
else:
self.handler = custom_handler
self._cookies = {}
+ self.retries = retries
+ self.retryDelay = retryDelay
def delete(self, url, headers=None, **kwargs):
"""Sends a DELETE request to a URL.
@@ -1212,6 +1255,8 @@ def post(self, url, headers=None, **kwargs):
headers.append(("Content-Type", "application/x-www-form-urlencoded"))
body = kwargs.pop('body')
+ if isinstance(body, dict):
+ body = _encode(**body).encode('utf-8')
if len(kwargs) > 0:
url = url + UrlEncoded('?' + _encode(**kwargs), skip_encode=True)
else:
@@ -1238,7 +1283,16 @@ def request(self, url, message, **kwargs):
its structure).
:rtype: ``dict``
"""
- response = self.handler(url, message, **kwargs)
+ while True:
+ try:
+ response = self.handler(url, message, **kwargs)
+ break
+ except Exception:
+ if self.retries <= 0:
+ raise
+ else:
+ time.sleep(self.retryDelay)
+ self.retries -= 1
response = record(response)
if 400 <= response.status:
raise HTTPError(response)
@@ -1274,7 +1328,10 @@ def __init__(self, response, connection=None):
self._buffer = b''
def __str__(self):
- return self.read()
+ if six.PY2:
+ return self.read()
+ else:
+ return str(self.read(), 'UTF-8')
@property
def empty(self):
@@ -1333,7 +1390,7 @@ def readinto(self, byte_array):
return bytes_read
-def handler(key_file=None, cert_file=None, timeout=None, verify=False):
+def handler(key_file=None, cert_file=None, timeout=None, verify=False, context=None):
"""This class returns an instance of the default HTTP request handler using
the values you provide.
@@ -1345,6 +1402,8 @@ def handler(key_file=None, cert_file=None, timeout=None, verify=False):
:type timeout: ``integer`` or "None"
:param `verify`: Set to False to disable SSL verification on https connections.
:type verify: ``Boolean``
+ :param `context`: The SSLContext that can is used with the HTTPSConnection when verify=True is enabled and context is specified
+ :type context: ``SSLContext`
"""
def connect(scheme, host, port):
@@ -1356,9 +1415,12 @@ def connect(scheme, host, port):
if key_file is not None: kwargs['key_file'] = key_file
if cert_file is not None: kwargs['cert_file'] = cert_file
- # If running Python 2.7.9+, disable SSL certificate validation
- if (sys.version_info >= (2,7,9) and key_file is None and cert_file is None) and not verify:
+ if not verify:
kwargs['context'] = ssl._create_unverified_context()
+ elif context:
+ # verify is True in elif branch and context is not None
+ kwargs['context'] = context
+
return six.moves.http_client.HTTPSConnection(host, port, **kwargs)
raise ValueError("unsupported scheme: %s" % scheme)
@@ -1368,7 +1430,7 @@ def request(url, message, **kwargs):
head = {
"Content-Length": str(len(body)),
"Host": host,
- "User-Agent": "splunk-sdk-python/1.6.6",
+ "User-Agent": "splunk-sdk-python/1.6.20",
"Accept": "*/*",
"Connection": "Close",
} # defaults
diff --git a/bin/ta_dmarc/aob_py2/splunklib/client.py b/bin/ta_dmarc/aob_py2/splunklib/client.py
index 1e624ba..35d9e4f 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/client.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/client.py
@@ -75,6 +75,8 @@
namespace)
from .data import record
+logger = logging.getLogger(__name__)
+
__all__ = [
"connect",
"NotSupportedError",
@@ -224,7 +226,10 @@ def _load_atom_entries(response):
# Load the sid from the body of the given response
-def _load_sid(response):
+def _load_sid(response, output_mode):
+ if output_mode == "json":
+ json_obj = json.loads(response.body.read())
+ return json_obj.get('sid')
return _load_atom(response).response.sid
@@ -295,7 +300,7 @@ def connect(**kwargs):
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
- :param verify: Enable (True) or disable (False) SSL verrification for
+ :param verify: Enable (True) or disable (False) SSL verification for
https connections. (optional, the default is True)
:type verify: ``Boolean``
:param `owner`: The owner context of the namespace (optional).
@@ -318,6 +323,13 @@ def connect(**kwargs):
:type username: ``string``
:param `password`: The password for the Splunk account.
:type password: ``string``
+ :param retires: Number of retries for each HTTP connection (optional, the default is 0).
+ NOTE THAT THIS MAY INCREASE THE NUMBER OF ROUND TRIP CONNECTIONS TO THE SPLUNK SERVER.
+ :type retries: ``int``
+ :param retryDelay: How long to wait between connection attempts if `retries` > 0 (optional, defaults to 10s).
+ :type retryDelay: ``int`` (in seconds)
+ :param `context`: The SSLContext that can be used when setting verify=True (optional)
+ :type context: ``SSLContext``
:return: An initialized :class:`Service` connection.
**Example**::
@@ -365,7 +377,7 @@ class Service(_BaseService):
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
- :param verify: Enable (True) or disable (False) SSL verrification for
+ :param verify: Enable (True) or disable (False) SSL verification for
https connections. (optional, the default is True)
:type verify: ``Boolean``
:param `owner`: The owner context of the namespace (optional; use "-" for wildcard).
@@ -384,6 +396,11 @@ class Service(_BaseService):
:param `password`: The password, which is used to authenticate the Splunk
instance.
:type password: ``string``
+ :param retires: Number of retries for each HTTP connection (optional, the default is 0).
+ NOTE THAT THIS MAY INCREASE THE NUMBER OF ROUND TRIP CONNECTIONS TO THE SPLUNK SERVER.
+ :type retries: ``int``
+ :param retryDelay: How long to wait between connection attempts if `retries` > 0 (optional, defaults to 10s).
+ :type retryDelay: ``int`` (in seconds)
:return: A :class:`Service` instance.
**Example**::
@@ -401,6 +418,7 @@ class Service(_BaseService):
def __init__(self, **kwargs):
super(Service, self).__init__(**kwargs)
self._splunk_version = None
+ self._kvstore_owner = None
@property
def apps(self):
@@ -463,6 +481,13 @@ def info(self):
response = self.get("/services/server/info")
return _filter_content(_load_atom(response, MATCH_ENTRY_CONTENT))
+ def input(self, path, kind=None):
+ """Retrieves an input by path, and optionally kind.
+
+ :return: A :class:`Input` object.
+ """
+ return Input(self, path, kind=kind).refresh()
+
@property
def inputs(self):
"""Returns the collection of inputs configured on this Splunk instance.
@@ -666,12 +691,34 @@ def splunk_version(self):
self._splunk_version = tuple([int(p) for p in self.info['version'].split('.')])
return self._splunk_version
+ @property
+ def kvstore_owner(self):
+ """Returns the KVStore owner for this instance of Splunk.
+
+ By default is the kvstore owner is not set, it will return "nobody"
+ :return: A string with the KVStore owner.
+ """
+ if self._kvstore_owner is None:
+ self._kvstore_owner = "nobody"
+ return self._kvstore_owner
+
+ @kvstore_owner.setter
+ def kvstore_owner(self, value):
+ """
+ kvstore is refreshed, when the owner value is changed
+ """
+ self._kvstore_owner = value
+ self.kvstore
+
@property
def kvstore(self):
"""Returns the collection of KV Store collections.
+ sets the owner for the namespace, before retrieving the KVStore Collection
+
:return: A :class:`KVStoreCollections` collection of :class:`KVStoreCollection` entities.
"""
+ self.namespace['owner'] = self.kvstore_owner
return KVStoreCollections(self)
@property
@@ -692,7 +739,7 @@ class Endpoint(object):
"""
def __init__(self, service, path):
self.service = service
- self.path = path if path.endswith('/') else path + '/'
+ self.path = path
def get(self, path_segment="", owner=None, app=None, sharing=None, **query):
"""Performs a GET operation on the path segment relative to this endpoint.
@@ -750,6 +797,8 @@ def get(self, path_segment="", owner=None, app=None, sharing=None, **query):
if path_segment.startswith('/'):
path = path_segment
else:
+ if not self.path.endswith('/') and path_segment != "":
+ self.path = self.path + '/'
path = self.service._abspath(self.path + path_segment, owner=owner,
app=app, sharing=sharing)
# ^-- This was "%s%s" % (self.path, path_segment).
@@ -810,6 +859,8 @@ def post(self, path_segment="", owner=None, app=None, sharing=None, **query):
if path_segment.startswith('/'):
path = path_segment
else:
+ if not self.path.endswith('/') and path_segment != "":
+ self.path = self.path + '/'
path = self.service._abspath(self.path + path_segment, owner=owner, app=app, sharing=sharing)
return self.service.post(path, owner=owner, app=app, sharing=sharing, **query)
@@ -821,35 +872,24 @@ class Entity(Endpoint):
``Entity`` provides the majority of functionality required by entities.
Subclasses only implement the special cases for individual entities.
- For example for deployment serverclasses, the subclass makes whitelists and
- blacklists into Python lists.
+ For example for saved searches, the subclass makes fields like ``action.email``,
+ ``alert_type``, and ``search`` available.
An ``Entity`` is addressed like a dictionary, with a few extensions,
- so the following all work::
-
- ent['email.action']
- ent['disabled']
- ent['whitelist']
-
- Many endpoints have values that share a prefix, such as
- ``email.to``, ``email.action``, and ``email.subject``. You can extract
- the whole fields, or use the key ``email`` to get a dictionary of
- all the subelements. That is, ``ent['email']`` returns a
- dictionary with the keys ``to``, ``action``, ``subject``, and so on. If
- there are multiple levels of dots, each level is made into a
- subdictionary, so ``email.body.salutation`` can be accessed at
- ``ent['email']['body']['salutation']`` or
- ``ent['email.body.salutation']``.
+ so the following all work, for example in saved searches::
+
+ ent['action.email']
+ ent['alert_type']
+ ent['search']
You can also access the fields as though they were the fields of a Python
object, as in::
- ent.email.action
- ent.disabled
- ent.whitelist
+ ent.alert_type
+ ent.search
However, because some of the field names are not valid Python identifiers,
- the dictionary-like syntax is preferrable.
+ the dictionary-like syntax is preferable.
The state of an :class:`Entity` object is cached, so accessing a field
does not contact the server. If you think the values on the
@@ -946,7 +986,10 @@ def __getitem__(self, key):
def _load_atom_entry(self, response):
elem = _load_atom(response, XNAME_ENTRY)
if isinstance(elem, list):
- raise AmbiguousReferenceException("Fetch from server returned multiple entries for name %s." % self.name)
+ apps = [ele.entry.content.get('eai:appName') for ele in elem]
+
+ raise AmbiguousReferenceException(
+ "Fetch from server returned multiple entries for name '%s' in apps %s." % (elem[0].entry.title, apps))
else:
return elem.entry
@@ -1052,8 +1095,6 @@ def content(self):
def disable(self):
"""Disables the entity at this endpoint."""
self.post("disable")
- if self.service.restart_required:
- self.service.restart(120)
return self
def enable(self):
@@ -1200,7 +1241,7 @@ def __getitem__(self, key):
:raises ValueError: Raised if no namespace is specified and *key*
does not refer to a unique name.
- *Example*::
+ **Example**::
s = client.connect(...)
saved_searches = s.saved_searches
@@ -1437,7 +1478,7 @@ def iter(self, offset=0, count=None, pagesize=None, **kwargs):
if pagesize is None or N < pagesize:
break
offset += N
- logging.debug("pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s", pagesize, fetched, offset, N, kwargs)
+ logger.debug("pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s", pagesize, fetched, offset, N, kwargs)
# kwargs: count, offset, search, sort_dir, sort_key, sort_mode
def list(self, count=None, **kwargs):
@@ -1636,9 +1677,9 @@ def get(self, name="", owner=None, app=None, sharing=None, **query):
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
- Example:
+ **Example**::
- import splunklib.client
+ import splunklib.client
s = client.service(...)
saved_searches = s.saved_searches
saved_searches.get("my/saved/search") == \\
@@ -1865,7 +1906,7 @@ def delete(self, username, realm=None):
name = UrlEncoded(realm, encode_slash=True) + ":" + UrlEncoded(username, encode_slash=True)
# Append the : expected at the end of the name
- if name[-1] is not ":":
+ if name[-1] != ":":
name = name + ":"
return Collection.delete(self, name)
@@ -2079,10 +2120,6 @@ def submit(self, event, host=None, source=None, sourcetype=None):
if source is not None: args['source'] = source
if sourcetype is not None: args['sourcetype'] = sourcetype
- # The reason we use service.request directly rather than POST
- # is that we are not sending a POST request encoded using
- # x-www-form-urlencoded (as we do not have a key=value body),
- # because we aren't really sending a "form".
self.service.post(PATH_RECEIVERS_SIMPLE, body=event, **args)
return self
@@ -2510,9 +2547,9 @@ def list(self, *kinds, **kwargs):
kinds = self.kinds
if len(kinds) == 1:
kind = kinds[0]
- logging.debug("Inputs.list taking short circuit branch for single kind.")
+ logger.debug("Inputs.list taking short circuit branch for single kind.")
path = self.kindpath(kind)
- logging.debug("Path for inputs: %s", path)
+ logger.debug("Path for inputs: %s", path)
try:
path = UrlEncoded(path, skip_encode=True)
response = self.get(path, **kwargs)
@@ -2730,9 +2767,8 @@ def pause(self):
return self
def results(self, **query_params):
- """Returns a streaming handle to this job's search results. To get a
- nice, Pythonic iterator, pass the handle to :class:`splunklib.results.ResultsReader`,
- as in::
+ """Returns a streaming handle to this job's search results. To get a nice, Pythonic iterator, pass the handle
+ to :class:`splunklib.results.JSONResultsReader` along with the query param "output_mode='json'", as in::
import splunklib.client as client
import splunklib.results as results
@@ -2741,7 +2777,7 @@ def results(self, **query_params):
job = service.jobs.create("search * | head 5")
while not job.is_done():
sleep(.2)
- rr = results.ResultsReader(job.results())
+ rr = results.JSONResultsReader(job.results(output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -2771,19 +2807,17 @@ def results(self, **query_params):
def preview(self, **query_params):
"""Returns a streaming handle to this job's preview search results.
- Unlike :class:`splunklib.results.ResultsReader`, which requires a job to
- be finished to
- return any results, the ``preview`` method returns any results that have
- been generated so far, whether the job is running or not. The
- returned search results are the raw data from the server. Pass
- the handle returned to :class:`splunklib.results.ResultsReader` to get a
- nice, Pythonic iterator over objects, as in::
+ Unlike :class:`splunklib.results.JSONResultsReader`along with the query param "output_mode='json'",
+ which requires a job to be finished to return any results, the ``preview`` method returns any results that
+ have been generated so far, whether the job is running or not. The returned search results are the raw data
+ from the server. Pass the handle returned to :class:`splunklib.results.JSONResultsReader` to get a nice,
+ Pythonic iterator over objects, as in::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
job = service.jobs.create("search * | head 5")
- rr = results.ResultsReader(job.preview())
+ rr = results.JSONResultsReader(job.preview(output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -2934,19 +2968,19 @@ def create(self, query, **kwargs):
if kwargs.get("exec_mode", None) == "oneshot":
raise TypeError("Cannot specify exec_mode=oneshot; use the oneshot method instead.")
response = self.post(search=query, **kwargs)
- sid = _load_sid(response)
+ sid = _load_sid(response, kwargs.get("output_mode", None))
return Job(self.service, sid)
def export(self, query, **params):
- """Runs a search and immediately starts streaming preview events.
- This method returns a streaming handle to this job's events as an XML
- document from the server. To parse this stream into usable Python objects,
- pass the handle to :class:`splunklib.results.ResultsReader`::
+ """Runs a search and immediately starts streaming preview events. This method returns a streaming handle to
+ this job's events as an XML document from the server. To parse this stream into usable Python objects,
+ pass the handle to :class:`splunklib.results.JSONResultsReader` along with the query param
+ "output_mode='json'"::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
- rr = results.ResultsReader(service.jobs.export("search * | head 5"))
+ rr = results.JSONResultsReader(service.jobs.export("search * | head 5",output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -2995,14 +3029,14 @@ def itemmeta(self):
def oneshot(self, query, **params):
"""Run a oneshot search and returns a streaming handle to the results.
- The ``InputStream`` object streams XML fragments from the server. To
- parse this stream into usable Python objects,
- pass the handle to :class:`splunklib.results.ResultsReader`::
+ The ``InputStream`` object streams fragments from the server. To parse this stream into usable Python
+ objects, pass the handle to :class:`splunklib.results.JSONResultsReader` along with the query param
+ "output_mode='json'" ::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
- rr = results.ResultsReader(service.jobs.oneshot("search * | head 5"))
+ rr = results.JSONResultsReader(service.jobs.oneshot("search * | head 5",output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -3150,7 +3184,7 @@ def dispatch(self, **kwargs):
:return: The :class:`Job`.
"""
response = self.post("dispatch", **kwargs)
- sid = _load_sid(response)
+ sid = _load_sid(response, kwargs.get("output_mode", None))
return Job(self.service, sid)
@property
@@ -3569,7 +3603,7 @@ class KVStoreCollection(Entity):
def data(self):
"""Returns data object for this Collection.
- :rtype: :class:`KVStoreData`
+ :rtype: :class:`KVStoreCollectionData`
"""
return KVStoreCollectionData(self)
@@ -3584,7 +3618,7 @@ def update_index(self, name, value):
:return: Result of POST request
"""
kwargs = {}
- kwargs['index.' + name] = value if isinstance(value, basestring) else json.dumps(value)
+ kwargs['index.' + name] = value if isinstance(value, six.string_types) else json.dumps(value)
return self.post(**kwargs)
def update_field(self, name, value):
@@ -3612,7 +3646,7 @@ def __init__(self, collection):
self.service = collection.service
self.collection = collection
self.owner, self.app, self.sharing = collection._proper_namespace()
- self.path = 'storage/collections/data/' + UrlEncoded(self.collection.name) + '/'
+ self.path = 'storage/collections/data/' + UrlEncoded(self.collection.name, encode_slash=True) + '/'
def _get(self, url, **kwargs):
return self.service.get(self.path + url, owner=self.owner, app=self.app, sharing=self.sharing, **kwargs)
@@ -3633,6 +3667,11 @@ def query(self, **query):
:return: Array of documents retrieved by query.
:rtype: ``array``
"""
+
+ for key, value in query.items():
+ if isinstance(query[key], dict):
+ query[key] = json.dumps(value)
+
return json.loads(self._get('', **query).body.read().decode('utf-8'))
def query_by_id(self, id):
@@ -3645,7 +3684,7 @@ def query_by_id(self, id):
:return: Document with id
:rtype: ``dict``
"""
- return json.loads(self._get(UrlEncoded(str(id))).body.read().decode('utf-8'))
+ return json.loads(self._get(UrlEncoded(str(id), encode_slash=True)).body.read().decode('utf-8'))
def insert(self, data):
"""
@@ -3657,6 +3696,8 @@ def insert(self, data):
:return: _id of inserted object
:rtype: ``dict``
"""
+ if isinstance(data, dict):
+ data = json.dumps(data)
return json.loads(self._post('', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
def delete(self, query=None):
@@ -3679,7 +3720,7 @@ def delete_by_id(self, id):
:return: Result of DELETE request
"""
- return self._delete(UrlEncoded(str(id)))
+ return self._delete(UrlEncoded(str(id), encode_slash=True))
def update(self, id, data):
"""
@@ -3693,7 +3734,9 @@ def update(self, id, data):
:return: id of replaced document
:rtype: ``dict``
"""
- return json.loads(self._post(UrlEncoded(str(id)), headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
+ if isinstance(data, dict):
+ data = json.dumps(data)
+ return json.loads(self._post(UrlEncoded(str(id), encode_slash=True), headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
def batch_find(self, *dbqueries):
"""
diff --git a/bin/ta_dmarc/aob_py2/splunklib/data.py b/bin/ta_dmarc/aob_py2/splunklib/data.py
index dedbb33..f9ffb86 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/data.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/data.py
@@ -161,8 +161,8 @@ def load_value(element, nametable=None):
text = element.text
if text is None:
return None
- text = text.strip()
- if len(text) == 0:
+
+ if len(text.strip()) == 0:
return None
return text
diff --git a/bin/ta_dmarc/aob_py2/splunklib/modularinput/argument.py b/bin/ta_dmarc/aob_py2/splunklib/modularinput/argument.py
index 4c4b3c8..04214d1 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/modularinput/argument.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/modularinput/argument.py
@@ -54,9 +54,9 @@ def __init__(self, name, description=None, validation=None,
:param name: ``string``, identifier for this argument in Splunk.
:param description: ``string``, human-readable description of the argument.
:param validation: ``string`` specifying how the argument should be validated, if using internal validation.
- If using external validation, this will be ignored.
+ If using external validation, this will be ignored.
:param data_type: ``string``, data type of this field; use the class constants.
- "data_type_boolean", "data_type_number", or "data_type_string".
+ "data_type_boolean", "data_type_number", or "data_type_string".
:param required_on_edit: ``Boolean``, whether this arg is required when editing an existing modular input of this kind.
:param required_on_create: ``Boolean``, whether this arg is required when creating a modular input of this kind.
:param title: ``String``, a human-readable title for the argument.
diff --git a/bin/ta_dmarc/aob_py2/splunklib/modularinput/event.py b/bin/ta_dmarc/aob_py2/splunklib/modularinput/event.py
index fdf19fa..9cd6cf3 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/modularinput/event.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/modularinput/event.py
@@ -13,6 +13,9 @@
# under the License.
from __future__ import absolute_import
+from io import TextIOBase
+from splunklib.six import ensure_text
+
try:
import xml.etree.cElementTree as ET
except ImportError as ie:
@@ -104,5 +107,8 @@ def write_to(self, stream):
if self.done:
ET.SubElement(event, "done")
- stream.write(ET.tostring(event))
+ if isinstance(stream, TextIOBase):
+ stream.write(ensure_text(ET.tostring(event)))
+ else:
+ stream.write(ET.tostring(event))
stream.flush()
\ No newline at end of file
diff --git a/bin/ta_dmarc/aob_py2/splunklib/modularinput/event_writer.py b/bin/ta_dmarc/aob_py2/splunklib/modularinput/event_writer.py
old mode 100644
new mode 100755
index fb96c91..5f8c5aa
--- a/bin/ta_dmarc/aob_py2/splunklib/modularinput/event_writer.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/modularinput/event_writer.py
@@ -15,6 +15,7 @@
from __future__ import absolute_import
import sys
+from splunklib.six import ensure_str
from .event import ET
try:
@@ -24,7 +25,6 @@
class EventWriter(object):
"""``EventWriter`` writes events and error messages to Splunk from a modular input.
-
Its two important methods are ``writeEvent``, which takes an ``Event`` object,
and ``log``, which takes a severity and an error message.
"""
@@ -55,7 +55,7 @@ def write_event(self, event):
"""
if not self.header_written:
- self._out.write(b"")
+ self._out.write("")
self.header_written = True
event.write_to(self._out)
@@ -68,7 +68,7 @@ def log(self, severity, message):
:param message: ``string``, message to log.
"""
- self._err.write(("%s %s\n" % (severity, message)).encode('utf-8'))
+ self._err.write("%s %s\n" % (severity, message))
self._err.flush()
def write_xml_document(self, document):
@@ -77,9 +77,11 @@ def write_xml_document(self, document):
:param document: An ``ElementTree`` object.
"""
- self._out.write(ET.tostring(document))
+ self._out.write(ensure_str(ET.tostring(document)))
self._out.flush()
def close(self):
"""Write the closing tag to make this XML well formed."""
- self._out.write(b"")
+ if self.header_written:
+ self._out.write("")
+ self._out.flush()
diff --git a/bin/ta_dmarc/aob_py2/splunklib/modularinput/scheme.py b/bin/ta_dmarc/aob_py2/splunklib/modularinput/scheme.py
index ff4f978..4104e4a 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/modularinput/scheme.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/modularinput/scheme.py
@@ -55,7 +55,7 @@ def add_argument(self, arg):
def to_xml(self):
"""Creates an ``ET.Element`` representing self, then returns it.
- :returns root, an ``ET.Element`` representing this scheme.
+ :returns: an ``ET.Element`` representing this scheme.
"""
root = ET.Element("scheme")
diff --git a/bin/ta_dmarc/aob_py2/splunklib/modularinput/script.py b/bin/ta_dmarc/aob_py2/splunklib/modularinput/script.py
index 040a07d..8595dc4 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/modularinput/script.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/modularinput/script.py
@@ -105,8 +105,7 @@ def run_script(self, args, event_writer, input_stream):
return 1
except Exception as e:
- err_string = EventWriter.ERROR + str(e)
- event_writer._err.write(err_string)
+ event_writer.log(EventWriter.ERROR, str(e))
return 1
@property
@@ -118,9 +117,9 @@ def service(self):
available as soon as the :code:`Script.stream_events` method is
called.
- :return: :class:splunklib.client.Service. A value of None is returned,
- if you call this method before the :code:`Script.stream_events` method
- is called.
+ :return: :class:`splunklib.client.Service`. A value of None is returned,
+ if you call this method before the :code:`Script.stream_events` method
+ is called.
"""
if self._service is not None:
diff --git a/bin/ta_dmarc/aob_py2/splunklib/modularinput/utils.py b/bin/ta_dmarc/aob_py2/splunklib/modularinput/utils.py
index 853694a..3d42b63 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/modularinput/utils.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/modularinput/utils.py
@@ -64,11 +64,14 @@ def parse_parameters(param_node):
def parse_xml_data(parent_node, child_node_tag):
data = {}
for child in parent_node:
+ child_name = child.get("name")
if child.tag == child_node_tag:
if child_node_tag == "stanza":
- data[child.get("name")] = {}
+ data[child_name] = {
+ "__app": child.get("app", None)
+ }
for param in child:
- data[child.get("name")][param.get("name")] = parse_parameters(param)
+ data[child_name][param.get("name")] = parse_parameters(param)
elif "item" == parent_node.tag:
- data[child.get("name")] = parse_parameters(child)
+ data[child_name] = parse_parameters(child)
return data
diff --git a/bin/ta_dmarc/aob_py2/splunklib/modularinput/validation_definition.py b/bin/ta_dmarc/aob_py2/splunklib/modularinput/validation_definition.py
index 8904e40..3bbe976 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/modularinput/validation_definition.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/modularinput/validation_definition.py
@@ -28,7 +28,7 @@ class ValidationDefinition(object):
**Example**::
- ``v = ValidationDefinition()``
+ v = ValidationDefinition()
"""
def __init__(self):
@@ -46,23 +46,25 @@ def parse(stream):
The XML typically will look like this:
- ````
- `` myHost``
- `` https://127.0.0.1:8089``
- `` 123102983109283019283``
- `` /opt/splunk/var/lib/splunk/modinputs``
- ``
- ``
- `` value1``
- `` ``
- `` value2``
- `` value3``
- `` value4``
- `` ``
- ``
``
- ````
+ .. code-block:: xml
+
+
+ myHost
+ https://127.0.0.1:8089
+ 123102983109283019283
+ /opt/splunk/var/lib/splunk/modinputs
+ -
+ value1
+
+ value2
+ value3
+ value4
+
+
+
:param stream: ``Stream`` containing XML to parse.
- :return definition: A ``ValidationDefinition`` object.
+ :return: A ``ValidationDefinition`` object.
"""
diff --git a/bin/ta_dmarc/aob_py2/splunklib/results.py b/bin/ta_dmarc/aob_py2/splunklib/results.py
index 20501c5..8543ab0 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/results.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/results.py
@@ -23,7 +23,7 @@
accessing search results while avoiding buffering the result set, which can be
very large.
-To use the reader, instantiate :class:`ResultsReader` on a search result stream
+To use the reader, instantiate :class:`JSONResultsReader` on a search result stream
as follows:::
reader = ResultsReader(result_stream)
@@ -34,18 +34,19 @@
from __future__ import absolute_import
-from io import BytesIO
+from io import BufferedReader, BytesIO
from splunklib import six
+
+from splunklib.six import deprecated
+
try:
import xml.etree.cElementTree as et
except:
import xml.etree.ElementTree as et
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from .ordereddict import OrderedDict
+from collections import OrderedDict
+from json import loads as json_loads
try:
from splunklib.six.moves import cStringIO as StringIO
@@ -54,9 +55,11 @@
__all__ = [
"ResultsReader",
- "Message"
+ "Message",
+ "JSONResultsReader"
]
+
class Message(object):
"""This class represents informational messages that Splunk interleaves in the results stream.
@@ -67,6 +70,7 @@ class Message(object):
m = Message("DEBUG", "There's something in that variable...")
"""
+
def __init__(self, type_, message):
self.type = type_
self.message = message
@@ -80,6 +84,7 @@ def __eq__(self, other):
def __hash__(self):
return hash((self.type, self.message))
+
class _ConcatenatedStream(object):
"""Lazily concatenate zero or more streams into a stream.
@@ -92,6 +97,7 @@ class _ConcatenatedStream(object):
s = _ConcatenatedStream(StringIO("abc"), StringIO("def"))
assert s.read() == "abcdef"
"""
+
def __init__(self, *streams):
self.streams = list(streams)
@@ -110,6 +116,7 @@ def read(self, n=None):
del self.streams[0]
return response
+
class _XMLDTDFilter(object):
"""Lazily remove all XML DTDs from a stream.
@@ -123,6 +130,7 @@ class _XMLDTDFilter(object):
s = _XMLDTDFilter("")
assert s.read() == ""
"""
+
def __init__(self, stream):
self.stream = stream
@@ -153,6 +161,8 @@ def read(self, n=None):
n -= 1
return response
+
+@deprecated("Use the JSONResultsReader function instead in conjuction with the 'output_mode' query param set to 'json'")
class ResultsReader(object):
"""This class returns dictionaries and Splunk messages from an XML results
stream.
@@ -180,6 +190,7 @@ class ResultsReader(object):
print "Message: %s" % result
print "is_preview = %s " % reader.is_preview
"""
+
# Be sure to update the docstrings of client.Jobs.oneshot,
# client.Job.results_preview and client.Job.results to match any
# changes made to ResultsReader.
@@ -260,16 +271,16 @@ def _parse_results(self, stream):
# So we'll define it here
def __itertext(self):
- tag = self.tag
- if not isinstance(tag, six.string_types) and tag is not None:
- return
- if self.text:
- yield self.text
- for e in self:
- for s in __itertext(e):
- yield s
- if e.tail:
- yield e.tail
+ tag = self.tag
+ if not isinstance(tag, six.string_types) and tag is not None:
+ return
+ if self.text:
+ yield self.text
+ for e in self:
+ for s in __itertext(e):
+ yield s
+ if e.tail:
+ yield e.tail
text = "".join(__itertext(elem))
values.append(text)
@@ -291,5 +302,72 @@ def __itertext(self):
raise
+class JSONResultsReader(object):
+ """This class returns dictionaries and Splunk messages from a JSON results
+ stream.
+ ``JSONResultsReader`` is iterable, and returns a ``dict`` for results, or a
+ :class:`Message` object for Splunk messages. This class has one field,
+ ``is_preview``, which is ``True`` when the results are a preview from a
+ running search, or ``False`` when the results are from a completed search.
+
+ This function has no network activity other than what is implicit in the
+ stream it operates on.
+
+ :param `stream`: The stream to read from (any object that supports``.read()``).
+
+ **Example**::
+
+ import results
+ response = ... # the body of an HTTP response
+ reader = results.JSONResultsReader(response)
+ for result in reader:
+ if isinstance(result, dict):
+ print "Result: %s" % result
+ elif isinstance(result, results.Message):
+ print "Message: %s" % result
+ print "is_preview = %s " % reader.is_preview
+ """
+
+ # Be sure to update the docstrings of client.Jobs.oneshot,
+ # client.Job.results_preview and client.Job.results to match any
+ # changes made to JSONResultsReader.
+ #
+ # This wouldn't be a class, just the _parse_results function below,
+ # except that you cannot get the current generator inside the
+ # function creating that generator. Thus it's all wrapped up for
+ # the sake of one field.
+ def __init__(self, stream):
+ # The search/jobs/exports endpoint, when run with
+ # earliest_time=rt and latest_time=rt, output_mode=json, streams a sequence of
+ # JSON documents, each containing a result, as opposed to one
+ # results element containing lots of results.
+ stream = BufferedReader(stream)
+ self.is_preview = None
+ self._gen = self._parse_results(stream)
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ return next(self._gen)
+ __next__ = next
+ def _parse_results(self, stream):
+ """Parse results and messages out of *stream*."""
+ for line in stream.readlines():
+ strip_line = line.strip()
+ if strip_line.__len__() == 0: continue
+ parsed_line = json_loads(strip_line)
+ if "preview" in parsed_line:
+ self.is_preview = parsed_line["preview"]
+ if "messages" in parsed_line and parsed_line["messages"].__len__() > 0:
+ for message in parsed_line["messages"]:
+ msg_type = message.get("type", "Unknown Message Type")
+ text = message.get("text")
+ yield Message(msg_type, text)
+ if "result" in parsed_line:
+ yield parsed_line["result"]
+ if "results" in parsed_line:
+ for result in parsed_line["results"]:
+ yield result
diff --git a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/__init__.py b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/__init__.py
index 12b14f3..8a92903 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/__init__.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/__init__.py
@@ -30,7 +30,7 @@
field-name = ( "_" / alpha ) *( alpha / digit / "_" / "." / "-" )
It does not show that :code:`field-name` values may be comma-separated. This is because Splunk strips commas from
- the command line. A search command will never see them.
+ the command line. A search command will never see them.
2. Search commands targeting versions of Splunk prior to 6.3 must be statically configured as follows:
@@ -134,9 +134,13 @@
.. topic:: References
- 1. `Search command style guide `_
+ 1. `Custom Search Command manual: `__
- 2. `Commands.conf.spec `_
+ 2. `Create Custom Search Commands with commands.conf.spec `_
+
+ 3. `Configure seach assistant with searchbnf.conf `_
+
+ 4. `Control search distribution with distsearch.conf `_
"""
diff --git a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/decorators.py b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/decorators.py
index b5e1606..d8b3f48 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/decorators.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/decorators.py
@@ -17,10 +17,7 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from splunklib import six
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from ..ordereddict import OrderedDict
+from collections import OrderedDict # must be python 2.7
from inspect import getmembers, isclass, isfunction
from splunklib.six.moves import map as imap
@@ -36,7 +33,7 @@ class Configuration(object):
variable to search command classes that don't have one. The :code:`name` is derived from the name of the class.
By convention command class names end with the word "Command". To derive :code:`name` the word "Command" is removed
from the end of the class name and then converted to lower case for conformance with the `Search command style guide
- `_
+ `__
"""
def __init__(self, o=None, **kwargs):
@@ -229,8 +226,9 @@ class Option(property):
Short form (recommended). When you are satisfied with built-in or custom validation behaviors.
- .. code-block:: python
+ .. code-block:: python
:linenos:
+
from splunklib.searchcommands.decorators import Option
from splunklib.searchcommands.validators import Fieldname
@@ -247,8 +245,9 @@ class Option(property):
also provide a deleter. You must be prepared to accept a value of :const:`None` which indicates that your
:code:`Option` is unset.
- .. code-block:: python
+ .. code-block:: python
:linenos:
+
from splunklib.searchcommands import Option
@Option()
diff --git a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/eventing_command.py b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/eventing_command.py
index 1481cee..27dc13a 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/eventing_command.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/eventing_command.py
@@ -16,6 +16,7 @@
from __future__ import absolute_import, division, print_function, unicode_literals
+from splunklib import six
from splunklib.six.moves import map as imap
from .decorators import ConfigurationSetting
@@ -135,8 +136,14 @@ def fix_up(cls, command):
raise AttributeError('No EventingCommand.transform override')
SearchCommand.ConfigurationSettings.fix_up(command)
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
iteritems = SearchCommand.ConfigurationSettings.iteritems(self)
return imap(lambda name_value: (name_value[0], 'events' if name_value[0] == 'type' else name_value[1]), iteritems)
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
+
# endregion
diff --git a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/external_search_command.py b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/external_search_command.py
index 2c4ce50..c230624 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/external_search_command.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/external_search_command.py
@@ -105,13 +105,13 @@ def _execute(path, argv=None, environ=None):
:param argv: Argument list.
:type argv: list or tuple
- The arguments to the child process should start with the name of the command being run, but this is not
- enforced. A value of :const:`None` specifies that the base name of path name :param:`path` should be used.
+ The arguments to the child process should start with the name of the command being run, but this is not
+ enforced. A value of :const:`None` specifies that the base name of path name :param:`path` should be used.
:param environ: A mapping which is used to define the environment variables for the new process.
:type environ: dict or None.
- This mapping is used instead of the current process’s environment. A value of :const:`None` specifies that
- the :data:`os.environ` mapping should be used.
+ This mapping is used instead of the current process’s environment. A value of :const:`None` specifies that
+ the :data:`os.environ` mapping should be used.
:return: None
diff --git a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/generating_command.py b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/generating_command.py
index fd0585e..6a75d2c 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/generating_command.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/generating_command.py
@@ -15,10 +15,12 @@
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
+import sys
from .decorators import ConfigurationSetting
from .search_command import SearchCommand
+from splunklib import six
from splunklib.six.moves import map as imap, filter as ifilter
# P1 [O] TODO: Discuss generates_timeorder in the class-level documentation for GeneratingCommand
@@ -92,9 +94,10 @@ class StreamingGeneratingCommand(GeneratingCommand)
+==========+===================================================+===================================================+
| streams | 1. Add this line to your command's stanza in | 1. Add this configuration setting to your code: |
| | | |
- | | default/commands.conf. | .. code-block:: python |
- | | .. code-block:: python | @Configuration(distributed=True) |
- | | local = false | class SomeCommand(GeneratingCommand) |
+ | | default/commands.conf:: | .. code-block:: python |
+ | | | |
+ | | local = false | @Configuration(distributed=True) |
+ | | | class SomeCommand(GeneratingCommand) |
| | | ... |
| | 2. Restart splunk | |
| | | 2. You are good to go; no need to restart Splunk |
@@ -112,6 +115,7 @@ class StreamingGeneratingCommand(GeneratingCommand)
| | settings to your command class: | setting to your command class: |
| | | |
| | .. code-block:: python | .. code-block:: python |
+ | | | |
| | @Configuration( | @Configuration(type='events') |
| | retainsevents=True, streaming=False) | class SomeCommand(GeneratingCommand) |
| | class SomeCommand(GeneratingCommand) | ... |
@@ -119,22 +123,25 @@ class StreamingGeneratingCommand(GeneratingCommand)
| | | |
| | Or add these lines to default/commands.conf: | |
| | | |
- | | .. code-block:: | |
- | | retains events = true | |
+ | | .. code-block:: text | |
+ | | | |
+ | | retainsevents = true | |
| | streaming = false | |
+----------+---------------------------------------------------+---------------------------------------------------+
Configure your command class like this, if you wish to support both protocols:
- .. code-block:: python
+ .. code-block:: python
+
@Configuration(type='events', retainsevents=True, streaming=False)
class SomeCommand(GeneratingCommand)
...
You might also consider adding these lines to commands.conf instead of adding them to your command class:
- .. code-block:: python
- retains events = false
+ .. code-block:: python
+
+ retainsevents = false
streaming = false
Reporting Generating command
@@ -149,28 +156,32 @@ class SomeCommand(GeneratingCommand)
| | settings to your command class: | setting to your command class: |
| | | |
| | .. code-block:: python | .. code-block:: python |
+ | | | |
| | @Configuration(retainsevents=False) | @Configuration(type='reporting') |
| | class SomeCommand(GeneratingCommand) | class SomeCommand(GeneratingCommand) |
| | ... | ... |
| | | |
| | Or add this lines to default/commands.conf: | |
| | | |
- | | .. code-block:: | |
- | | retains events = false | |
+ | | .. code-block:: text | |
+ | | | |
+ | | retainsevents = false | |
| | streaming = false | |
+----------+---------------------------------------------------+---------------------------------------------------+
Configure your command class like this, if you wish to support both protocols:
- .. code-block:: python
+ .. code-block:: python
+
@Configuration(type='reporting', streaming=False)
class SomeCommand(GeneratingCommand)
...
You might also consider adding these lines to commands.conf instead of adding them to your command class:
- .. code-block:: python
- retains events = false
+ .. code-block:: text
+
+ retainsevents = false
streaming = false
"""
@@ -194,19 +205,57 @@ def _execute(self, ifile, process):
"""
if self._protocol_version == 2:
- result = self._read_chunk(ifile)
+ self._execute_v2(ifile, self.generate())
+ else:
+ assert self._protocol_version == 1
+ self._record_writer.write_records(self.generate())
+ self.finish()
- if not result:
- return
+ def _execute_chunk_v2(self, process, chunk):
+ count = 0
+ records = []
+ for row in process:
+ records.append(row)
+ count += 1
+ if count == self._record_writer._maxresultrows:
+ break
- metadata, body = result
- action = getattr(metadata, 'action', None)
+ for row in records:
+ self._record_writer.write_record(row)
- if action != 'execute':
- raise RuntimeError('Expected execute action, not {}'.format(action))
+ if count == self._record_writer._maxresultrows:
+ self._finished = False
+ else:
+ self._finished = True
- self._record_writer.write_records(self.generate())
- self.finish()
+ def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout, allow_empty_input=True):
+ """ Process data.
+
+ :param argv: Command line arguments.
+ :type argv: list or tuple
+
+ :param ifile: Input data file.
+ :type ifile: file
+
+ :param ofile: Output data file.
+ :type ofile: file
+
+ :param allow_empty_input: For generating commands, it must be true. Doing otherwise will cause an error.
+ :type allow_empty_input: bool
+
+ :return: :const:`None`
+ :rtype: NoneType
+
+ """
+
+ # Generating commands are expected to run on an empty set of inputs as the first command being run in a search,
+ # also this class implements its own separate _execute_chunk_v2 method which does not respect allow_empty_input
+ # so ensure that allow_empty_input is always True
+
+ if not allow_empty_input:
+ raise ValueError("allow_empty_input cannot be False for Generating Commands")
+ else:
+ return super(GeneratingCommand, self).process(argv=argv, ifile=ifile, ofile=ofile, allow_empty_input=True)
# endregion
@@ -315,6 +364,8 @@ def fix_up(cls, command):
if command.generate == GeneratingCommand.generate:
raise AttributeError('No GeneratingCommand.generate override')
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
iteritems = SearchCommand.ConfigurationSettings.iteritems(self)
version = self.command.protocol_version
@@ -325,6 +376,10 @@ def iteritems(self):
lambda name_value: (name_value[0], 'stateful') if name_value[0] == 'type' else (name_value[0], name_value[1]), iteritems)
return iteritems
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
+
pass
# endregion
diff --git a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/internals.py b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/internals.py
index 02634c0..1ea2833 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/internals.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/internals.py
@@ -16,12 +16,10 @@
from __future__ import absolute_import, division, print_function
+from io import TextIOWrapper
from collections import deque, namedtuple
from splunklib import six
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from ..ordereddict import OrderedDict
+from collections import OrderedDict
from splunklib.six.moves import StringIO
from itertools import chain
from splunklib.six.moves import map as imap
@@ -34,27 +32,47 @@
import os
import re
import sys
+import warnings
from . import environment
csv.field_size_limit(10485760) # The default value is 128KB; upping to 10MB. See SPL-12117 for background on this issue
-if sys.platform == 'win32':
- # Work around the fact that on Windows '\n' is mapped to '\r\n'. The typical solution is to simply open files in
- # binary mode, but stdout is already open, thus this hack. 'CPython' and 'PyPy' work differently. We assume that
- # all other Python implementations are compatible with 'CPython'. This might or might not be a valid assumption.
- from platform import python_implementation
- implementation = python_implementation()
- fileno = sys.stdout.fileno()
- if implementation == 'PyPy':
- sys.stdout = os.fdopen(fileno, 'wb', 0)
- else:
- from msvcrt import setmode
- setmode(fileno, os.O_BINARY)
+
+def set_binary_mode(fh):
+ """ Helper method to set up binary mode for file handles.
+ Emphasis being sys.stdin, sys.stdout, sys.stderr.
+ For python3, we want to return .buffer
+ For python2+windows we want to set os.O_BINARY
+ """
+ typefile = TextIOWrapper if sys.version_info >= (3, 0) else file
+ # check for file handle
+ if not isinstance(fh, typefile):
+ return fh
+
+ # check for python3 and buffer
+ if sys.version_info >= (3, 0) and hasattr(fh, 'buffer'):
+ return fh.buffer
+ # check for python3
+ elif sys.version_info >= (3, 0):
+ pass
+ # check for windows python2. SPL-175233 -- python3 stdout is already binary
+ elif sys.platform == 'win32':
+ # Work around the fact that on Windows '\n' is mapped to '\r\n'. The typical solution is to simply open files in
+ # binary mode, but stdout is already open, thus this hack. 'CPython' and 'PyPy' work differently. We assume that
+ # all other Python implementations are compatible with 'CPython'. This might or might not be a valid assumption.
+ from platform import python_implementation
+ implementation = python_implementation()
+ if implementation == 'PyPy':
+ return os.fdopen(fh.fileno(), 'wb', 0)
+ else:
+ import msvcrt
+ msvcrt.setmode(fh.fileno(), os.O_BINARY)
+ return fh
class CommandLineParser(object):
- """ Parses the arguments to a search command.
+ r""" Parses the arguments to a search command.
A search command line is described by the following syntax.
@@ -212,7 +230,7 @@ def replace(match):
_escaped_character_re = re.compile(r'(\\.|""|[\\"])')
- _fieldnames_re = re.compile(r"""("(?:\\.|""|[^"])+"|(?:\\.|[^\s"])+)""")
+ _fieldnames_re = re.compile(r"""("(?:\\.|""|[^"\\])+"|(?:\\.|[^\s"])+)""")
_options_re = re.compile(r"""
# Captures a set of name/value pairs when used with re.finditer
@@ -339,6 +357,8 @@ class CsvDialect(csv.Dialect):
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
+ if sys.version_info >= (3, 0) and sys.platform == 'win32':
+ lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
@@ -346,6 +366,7 @@ class InputHeader(dict):
""" Represents a Splunk input header as a collection of name/value pairs.
"""
+
def __str__(self):
return '\n'.join([name + ':' + value for name, value in six.iteritems(self)])
@@ -373,7 +394,8 @@ def read(self, ifile):
# continuation of the current item
value += urllib.parse.unquote(line)
- if name is not None: self[name] = value[:-1] if value[-1] == '\n' else value
+ if name is not None:
+ self[name] = value[:-1] if value[-1] == '\n' else value
Message = namedtuple('Message', ('type', 'text'))
@@ -470,7 +492,7 @@ class RecordWriter(object):
def __init__(self, ofile, maxresultrows=None):
self._maxresultrows = 50000 if maxresultrows is None else maxresultrows
- self._ofile = ofile
+ self._ofile = set_binary_mode(ofile)
self._fieldnames = None
self._buffer = StringIO()
@@ -481,8 +503,9 @@ def __init__(self, ofile, maxresultrows=None):
self._inspector = OrderedDict()
self._chunk_count = 0
- self._record_count = 0
- self._total_record_count = 0
+ self._pending_record_count = 0
+ self._committed_record_count = 0
+ self.custom_fields = set()
@property
def is_flushed(self):
@@ -498,7 +521,37 @@ def ofile(self):
@ofile.setter
def ofile(self, value):
- self._ofile = value
+ self._ofile = set_binary_mode(value)
+
+ @property
+ def pending_record_count(self):
+ return self._pending_record_count
+
+ @property
+ def _record_count(self):
+ warnings.warn(
+ "_record_count will be deprecated soon. Use pending_record_count instead.",
+ PendingDeprecationWarning
+ )
+ return self.pending_record_count
+
+ @property
+ def committed_record_count(self):
+ return self._committed_record_count
+
+ @property
+ def _total_record_count(self):
+ warnings.warn(
+ "_total_record_count will be deprecated soon. Use committed_record_count instead.",
+ PendingDeprecationWarning
+ )
+ return self.committed_record_count
+
+ def write(self, data):
+ bytes_type = bytes if sys.version_info >= (3, 0) else str
+ if not isinstance(data, bytes_type):
+ data = data.encode('utf-8')
+ self.ofile.write(data)
def flush(self, finished=None, partial=None):
assert finished is None or isinstance(finished, bool)
@@ -517,6 +570,7 @@ def write_record(self, record):
def write_records(self, records):
self._ensure_validity()
+ records = list(records)
write_record = self._write_record
for record in records:
write_record(record)
@@ -525,8 +579,7 @@ def _clear(self):
self._buffer.seek(0)
self._buffer.truncate()
self._inspector.clear()
- self._record_count = 0
- self._flushed = False
+ self._pending_record_count = 0
def _ensure_validity(self):
if self._finished is True:
@@ -539,6 +592,7 @@ def _write_record(self, record):
if fieldnames is None:
self._fieldnames = fieldnames = list(record.keys())
+ self._fieldnames.extend([i for i in self.custom_fields if i not in self._fieldnames])
value_list = imap(lambda fn: (str(fn), str('__mv_') + str(fn)), fieldnames)
self._writerow(list(chain.from_iterable(value_list)))
@@ -580,7 +634,7 @@ def _write_record(self, record):
value = str(value.real)
elif value_t is six.text_type:
value = value
- elif value_t is int or value_t is int or value_t is float or value_t is complex:
+ elif isinstance(value, six.integer_types) or value_t is float or value_t is complex:
value = str(value)
elif issubclass(value_t, (dict, list, tuple)):
value = str(''.join(RecordWriter._iterencode_json(value, 0)))
@@ -610,7 +664,7 @@ def _write_record(self, record):
values += (value, None)
continue
- if value_t is int or value_t is int or value_t is float or value_t is complex:
+ if isinstance(value, six.integer_types) or value_t is float or value_t is complex:
values += (str(value), None)
continue
@@ -621,9 +675,9 @@ def _write_record(self, record):
values += (repr(value), None)
self._writerow(values)
- self._record_count += 1
+ self._pending_record_count += 1
- if self._record_count >= self._maxresultrows:
+ if self.pending_record_count >= self._maxresultrows:
self.flush(partial=True)
try:
@@ -660,10 +714,9 @@ def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
- if self._record_count > 0 or (self._chunk_count == 0 and 'messages' in self._inspector):
+ if self.pending_record_count > 0 or (self._chunk_count == 0 and 'messages' in self._inspector):
messages = self._inspector.get('messages')
- write = self._ofile.write
if self._chunk_count == 0:
@@ -675,12 +728,12 @@ def flush(self, finished=None, partial=None):
message_level = RecordWriterV1._message_level.get
for level, text in messages:
- write(message_level(level, level))
- write('=')
- write(text)
- write('\r\n')
+ self.write(message_level(level, level))
+ self.write('=')
+ self.write(text)
+ self.write('\r\n')
- write('\r\n')
+ self.write('\r\n')
elif messages is not None:
@@ -698,10 +751,10 @@ def flush(self, finished=None, partial=None):
for level, text in messages:
print(level, text, file=stderr)
- write(self._buffer.getvalue())
- self._clear()
+ self.write(self._buffer.getvalue())
self._chunk_count += 1
- self._total_record_count += self._record_count
+ self._committed_record_count += self.pending_record_count
+ self._clear()
self._finished = finished is True
@@ -719,44 +772,43 @@ class RecordWriterV2(RecordWriter):
def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
- inspector = self._inspector
-
- if self._flushed is False:
-
- self._total_record_count += self._record_count
- self._chunk_count += 1
-
- # TODO: DVPL-6448: splunklib.searchcommands | Add support for partial: true when it is implemented in
- # ChunkedExternProcessor (See SPL-103525)
- #
- # We will need to replace the following block of code with this block:
- #
- # metadata = [
- # ('inspector', self._inspector if len(self._inspector) else None),
- # ('finished', finished),
- # ('partial', partial)]
- if len(inspector) == 0:
- inspector = None
-
- if partial is True:
- finished = False
-
- metadata = [item for item in (('inspector', inspector), ('finished', finished))]
- self._write_chunk(metadata, self._buffer.getvalue())
- self._clear()
+ if partial or not finished:
+ # Don't flush partial chunks, since the SCP v2 protocol does not
+ # provide a way to send partial chunks yet.
+ return
- elif finished is True:
- self._write_chunk((('finished', True),), '')
+ if not self.is_flushed:
+ self.write_chunk(finished=True)
- self._finished = finished is True
+ def write_chunk(self, finished=None):
+ inspector = self._inspector
+ self._committed_record_count += self.pending_record_count
+ self._chunk_count += 1
+
+ # TODO: DVPL-6448: splunklib.searchcommands | Add support for partial: true when it is implemented in
+ # ChunkedExternProcessor (See SPL-103525)
+ #
+ # We will need to replace the following block of code with this block:
+ #
+ # metadata = [item for item in (('inspector', inspector), ('finished', finished), ('partial', partial))]
+ #
+ # if partial is True:
+ # finished = False
+
+ if len(inspector) == 0:
+ inspector = None
+
+ metadata = [item for item in (('inspector', inspector), ('finished', finished))]
+ self._write_chunk(metadata, self._buffer.getvalue())
+ self._clear()
def write_metadata(self, configuration):
self._ensure_validity()
metadata = chain(six.iteritems(configuration), (('inspector', self._inspector if self._inspector else None),))
self._write_chunk(metadata, '')
- self._ofile.write('\n')
+ self.write('\n')
self._clear()
def write_metric(self, name, value):
@@ -764,26 +816,29 @@ def write_metric(self, name, value):
self._inspector['metric.' + name] = value
def _clear(self):
- RecordWriter._clear(self)
+ super(RecordWriterV2, self)._clear()
self._fieldnames = None
def _write_chunk(self, metadata, body):
if metadata:
metadata = str(''.join(self._iterencode_json(dict([(n, v) for n, v in metadata if v is not None]), 0)))
+ if sys.version_info >= (3, 0):
+ metadata = metadata.encode('utf-8')
metadata_length = len(metadata)
else:
metadata_length = 0
+ if sys.version_info >= (3, 0):
+ body = body.encode('utf-8')
body_length = len(body)
if not (metadata_length > 0 or body_length > 0):
return
start_line = 'chunked 1.0,%s,%s\n' % (metadata_length, body_length)
- write = self._ofile.write
- write(start_line)
- write(metadata)
- write(body)
+ self.write(start_line)
+ self.write(metadata)
+ self.write(body)
self._ofile.flush()
- self._flushed = False
+ self._flushed = True
diff --git a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/reporting_command.py b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/reporting_command.py
index 3d6b357..9470861 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/reporting_command.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/reporting_command.py
@@ -253,7 +253,7 @@ def fix_up(cls, command):
cls._requires_preop = False
return
- f = vars(command)[b'map'] # Function backing the map method
+ f = vars(command)['map'] # Function backing the map method
# EXPLANATION OF PREVIOUS STATEMENT: There is no way to add custom attributes to methods. See [Why does
# setattr fail on a method](http://stackoverflow.com/questions/7891277/why-does-setattr-fail-on-a-bound-method) for a discussion of this issue.
@@ -266,7 +266,7 @@ def fix_up(cls, command):
# Create new StreamingCommand.ConfigurationSettings class
- module = command.__module__ + b'.' + command.__name__ + b'.map'
+ module = command.__module__ + '.' + command.__name__ + '.map'
name = b'ConfigurationSettings'
bases = (StreamingCommand.ConfigurationSettings,)
diff --git a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/search_command.py b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/search_command.py
index 47918ab..dd11391 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/search_command.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/search_command.py
@@ -22,10 +22,7 @@
import io
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from ..ordereddict import OrderedDict
+from collections import OrderedDict
from copy import deepcopy
from splunklib.six.moves import StringIO
from itertools import chain, islice
@@ -124,6 +121,7 @@ def __init__(self):
self._default_logging_level = self._logger.level
self._record_writer = None
self._records = None
+ self._allow_empty_input = True
def __str__(self):
text = ' '.join(chain((type(self).name, str(self.options)), [] if self.fieldnames is None else self.fieldnames))
@@ -172,6 +170,14 @@ def logging_level(self, value):
raise ValueError('Unrecognized logging level: {}'.format(value))
self._logger.setLevel(level)
+ def add_field(self, current_record, field_name, field_value):
+ self._record_writer.custom_fields.add(field_name)
+ current_record[field_name] = field_value
+
+ def gen_record(self, **record):
+ self._record_writer.custom_fields |= set(record.keys())
+ return record
+
record = Option(doc='''
**Syntax: record=
@@ -256,7 +262,7 @@ def search_results_info(self):
invocation.
:return: Search results info:const:`None`, if the search results info file associated with the command
- invocation is inaccessible.
+ invocation is inaccessible.
:rtype: SearchResultsInfo or NoneType
"""
@@ -338,6 +344,7 @@ def service(self):
specifying this pair of configuration settings in commands.conf:
.. code-block:: python
+
enableheader = true
requires_srinfo = true
@@ -345,8 +352,8 @@ def service(self):
:code:`requires_srinfo` setting is false by default. Hence, you must set it.
:return: :class:`splunklib.client.Service`, if :code:`enableheader` and :code:`requires_srinfo` are both
- :code:`true`. Otherwise, if either :code:`enableheader` or :code:`requires_srinfo` are :code:`false`, a value
- of :code:`None` is returned.
+ :code:`true`. Otherwise, if either :code:`enableheader` or :code:`requires_srinfo` are :code:`false`, a value
+ of :code:`None` is returned.
"""
if self._service is not None:
@@ -397,7 +404,7 @@ def flush(self):
:return: :const:`None`
"""
- self._record_writer.flush(partial=True)
+ self._record_writer.flush(finished=False)
def prepare(self):
""" Prepare for execution.
@@ -412,7 +419,7 @@ def prepare(self):
"""
pass
- def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout):
+ def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout, allow_empty_input=True):
""" Process data.
:param argv: Command line arguments.
@@ -424,10 +431,16 @@ def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout):
:param ofile: Output data file.
:type ofile: file
+ :param allow_empty_input: Allow empty input records for the command, if False an Error will be returned if empty chunk body is encountered when read
+ :type allow_empty_input: bool
+
:return: :const:`None`
:rtype: NoneType
"""
+
+ self._allow_empty_input = allow_empty_input
+
if len(argv) > 1:
self._process_protocol_v1(argv, ifile, ofile)
else:
@@ -633,6 +646,19 @@ def _process_protocol_v1(self, argv, ifile, ofile):
debug('%s.process finished under protocol_version=1', class_name)
+ def _protocol_v2_option_parser(self, arg):
+ """ Determines if an argument is an Option/Value pair, or just a Positional Argument.
+ Method so different search commands can handle parsing of arguments differently.
+
+ :param arg: A single argument provided to the command from SPL
+ :type arg: str
+
+ :return: [OptionName, OptionValue] OR [PositionalArgument]
+ :rtype: List[str]
+
+ """
+ return arg.split('=', 1)
+
def _process_protocol_v2(self, argv, ifile, ofile):
""" Processes records on the `input stream optionally writing records to the output stream.
@@ -655,7 +681,7 @@ def _process_protocol_v2(self, argv, ifile, ofile):
# noinspection PyBroadException
try:
debug('Reading metadata')
- metadata, body = self._read_chunk(ifile)
+ metadata, body = self._read_chunk(self._as_binary_stream(ifile))
action = getattr(metadata, 'action', None)
@@ -703,7 +729,7 @@ def _process_protocol_v2(self, argv, ifile, ofile):
if args and type(args) == list:
for arg in args:
- result = arg.split('=', 1)
+ result = self._protocol_v2_option_parser(arg)
if len(result) == 1:
self.fieldnames.append(str(result[0]))
else:
@@ -775,7 +801,6 @@ def _process_protocol_v2(self, argv, ifile, ofile):
# noinspection PyBroadException
try:
debug('Executing under protocol_version=2')
- self._records = self._records_protocol_v2
self._metadata.action = 'execute'
self._execute(ifile, None)
except SystemExit:
@@ -809,15 +834,15 @@ def write_metric(self, name, value):
:param name: Name of the metric.
:type name: basestring
- :param value: A 4-tuple containing the value of metric :param:`name` where
+ :param value: A 4-tuple containing the value of metric ``name`` where
value[0] = Elapsed seconds or :const:`None`.
value[1] = Number of invocations or :const:`None`.
value[2] = Input count or :const:`None`.
value[3] = Output count or :const:`None`.
- The :data:`SearchMetric` type provides a convenient encapsulation of :param:`value`.
- The :data:`SearchMetric` type provides a convenient encapsulation of :param:`value`.
+ The :data:`SearchMetric` type provides a convenient encapsulation of ``value``.
+ The :data:`SearchMetric` type provides a convenient encapsulation of ``value``.
:return: :const:`None`.
@@ -832,6 +857,8 @@ def _decode_list(mv):
_encoded_value = re.compile(r'\$(?P- (?:\$\$|[^$])*)\$(?:;|$)') # matches a single value in an encoded list
+ # Note: Subclasses must override this method so that it can be called
+ # called as self._execute(ifile, None)
def _execute(self, ifile, process):
""" Default processing loop
@@ -845,21 +872,38 @@ def _execute(self, ifile, process):
:rtype: NoneType
"""
- self._record_writer.write_records(process(self._records(ifile)))
- self.finish()
+ if self.protocol_version == 1:
+ self._record_writer.write_records(process(self._records(ifile)))
+ self.finish()
+ else:
+ assert self._protocol_version == 2
+ self._execute_v2(ifile, process)
+
+ @staticmethod
+ def _as_binary_stream(ifile):
+ naught = ifile.read(0)
+ if isinstance(naught, bytes):
+ return ifile
+
+ try:
+ return ifile.buffer
+ except AttributeError as error:
+ raise RuntimeError('Failed to get underlying buffer: {}'.format(error))
@staticmethod
- def _read_chunk(ifile):
+ def _read_chunk(istream):
# noinspection PyBroadException
+ assert isinstance(istream.read(0), six.binary_type), 'Stream must be binary'
+
try:
- header = ifile.readline()
+ header = istream.readline()
except Exception as error:
raise RuntimeError('Failed to read transport header: {}'.format(error))
if not header:
return None
- match = SearchCommand._header.match(header)
+ match = SearchCommand._header.match(six.ensure_str(header))
if match is None:
raise RuntimeError('Failed to parse transport header: {}'.format(header))
@@ -869,14 +913,14 @@ def _read_chunk(ifile):
body_length = int(body_length)
try:
- metadata = ifile.read(metadata_length)
+ metadata = istream.read(metadata_length)
except Exception as error:
raise RuntimeError('Failed to read metadata of length {}: {}'.format(metadata_length, error))
decoder = MetadataDecoder()
try:
- metadata = decoder.decode(metadata)
+ metadata = decoder.decode(six.ensure_str(metadata))
except Exception as error:
raise RuntimeError('Failed to parse metadata of length {}: {}'.format(metadata_length, error))
@@ -886,16 +930,18 @@ def _read_chunk(ifile):
body = ""
try:
if body_length > 0:
- body = ifile.read(body_length)
+ body = istream.read(body_length)
except Exception as error:
raise RuntimeError('Failed to read body of length {}: {}'.format(body_length, error))
- return metadata, body
+ return metadata, six.ensure_str(body)
_header = re.compile(r'chunked\s+1.0\s*,\s*(\d+)\s*,\s*(\d+)\s*\n')
def _records_protocol_v1(self, ifile):
+ return self._read_csv_records(ifile)
+ def _read_csv_records(self, ifile):
reader = csv.reader(ifile, dialect=CsvDialect)
try:
@@ -920,51 +966,37 @@ def _records_protocol_v1(self, ifile):
record[fieldname] = value
yield record
- def _records_protocol_v2(self, ifile):
+ def _execute_v2(self, ifile, process):
+ istream = self._as_binary_stream(ifile)
while True:
- result = self._read_chunk(ifile)
+ result = self._read_chunk(istream)
if not result:
return
metadata, body = result
action = getattr(metadata, 'action', None)
-
if action != 'execute':
raise RuntimeError('Expected execute action, not {}'.format(action))
- finished = getattr(metadata, 'finished', False)
+ self._finished = getattr(metadata, 'finished', False)
self._record_writer.is_flushed = False
- if len(body) > 0:
- reader = csv.reader(StringIO(body), dialect=CsvDialect)
+ self._execute_chunk_v2(process, result)
- try:
- fieldnames = next(reader)
- except StopIteration:
- return
+ self._record_writer.write_chunk(finished=self._finished)
- mv_fieldnames = dict([(name, name[len('__mv_'):]) for name in fieldnames if name.startswith('__mv_')])
+ def _execute_chunk_v2(self, process, chunk):
+ metadata, body = chunk
- if len(mv_fieldnames) == 0:
- for values in reader:
- yield OrderedDict(izip(fieldnames, values))
- else:
- for values in reader:
- record = OrderedDict()
- for fieldname, value in izip(fieldnames, values):
- if fieldname.startswith('__mv_'):
- if len(value) > 0:
- record[mv_fieldnames[fieldname]] = self._decode_list(value)
- elif fieldname not in record:
- record[fieldname] = value
- yield record
-
- if finished:
- return
+ if len(body) <= 0 and not self._allow_empty_input:
+ raise ValueError(
+ "No records found to process. Set allow_empty_input=True in dispatch function to move forward "
+ "with empty records.")
- self.flush()
+ records = self._read_csv_records(StringIO(body))
+ self._record_writer.write_records(process(records))
def _report_unexpected_error(self):
@@ -1035,6 +1067,8 @@ def fix_up(cls, command_class):
"""
return
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
definitions = type(self).configuration_setting_definitions
version = self.command.protocol_version
@@ -1043,7 +1077,9 @@ def iteritems(self):
lambda setting: (setting.name, setting.__get__(self)), ifilter(
lambda setting: setting.is_supported_by_protocol(version), definitions)))
- items = iteritems
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
pass # endregion
@@ -1053,7 +1089,7 @@ def iteritems(self):
SearchMetric = namedtuple('SearchMetric', ('elapsed_seconds', 'invocation_count', 'input_count', 'output_count'))
-def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys.stdout, module_name=None):
+def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys.stdout, module_name=None, allow_empty_input=True):
""" Instantiates and executes a search command class
This function implements a `conditional script stanza `_ based on the value of
@@ -1076,11 +1112,13 @@ def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys
:type output_file: :code:`file`
:param module_name: Name of the module calling :code:`dispatch` or :const:`None`.
:type module_name: :code:`basestring`
+ :param allow_empty_input: Allow empty input records for the command, if False an Error will be returned if empty chunk body is encountered when read
+ :type allow_empty_input: bool
:returns: :const:`None`
**Example**
- .. code-block:: python
+ .. code-block:: python
:linenos:
#!/usr/bin/env python
@@ -1096,7 +1134,7 @@ def stream(records):
**Example**
- .. code-block:: python
+ .. code-block:: python
:linenos:
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option, validators
@@ -1113,4 +1151,4 @@ def stream(records):
assert issubclass(command_class, SearchCommand)
if module_name is None or module_name == '__main__':
- command_class().process(argv, input_file, output_file)
+ command_class().process(argv, input_file, output_file, allow_empty_input)
diff --git a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/streaming_command.py b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/streaming_command.py
index 9d900c3..fa075ed 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/streaming_command.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/streaming_command.py
@@ -16,6 +16,7 @@
from __future__ import absolute_import, division, print_function, unicode_literals
+from splunklib import six
from splunklib.six.moves import map as imap, filter as ifilter
from .decorators import ConfigurationSetting
@@ -172,6 +173,8 @@ def fix_up(cls, command):
raise AttributeError('No StreamingCommand.stream override')
return
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
iteritems = SearchCommand.ConfigurationSettings.iteritems(self)
version = self.command.protocol_version
@@ -185,4 +188,8 @@ def iteritems(self):
lambda name_value1: (name_value1[0], 'stateful') if name_value1[0] == 'type' else (name_value1[0], name_value1[1]), iteritems)
return iteritems
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
+
# endregion
diff --git a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/validators.py b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/validators.py
index 6632937..22f0e16 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/searchcommands/validators.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/searchcommands/validators.py
@@ -81,9 +81,9 @@ class Code(Validator):
def __init__(self, mode='eval'):
"""
:param mode: Specifies what kind of code must be compiled; it can be :const:`'exec'`, if source consists of a
- sequence of statements, :const:`'eval'`, if it consists of a single expression, or :const:`'single'` if it
- consists of a single interactive statement. In the latter case, expression statements that evaluate to
- something other than :const:`None` will be printed.
+ sequence of statements, :const:`'eval'`, if it consists of a single expression, or :const:`'single'` if it
+ consists of a single interactive statement. In the latter case, expression statements that evaluate to
+ something other than :const:`None` will be printed.
:type mode: unicode or bytes
"""
@@ -95,7 +95,9 @@ def __call__(self, value):
try:
return Code.object(compile(value, 'string', self._mode), six.text_type(value))
except (SyntaxError, TypeError) as error:
- raise ValueError(error.message)
+ message = str(error)
+
+ six.raise_from(ValueError(message), error)
def format(self, value):
return None if value is None else value.source
@@ -199,6 +201,48 @@ def format(self, value):
return None if value is None else six.text_type(int(value))
+class Float(Validator):
+ """ Validates float option values.
+
+ """
+ def __init__(self, minimum=None, maximum=None):
+ if minimum is not None and maximum is not None:
+ def check_range(value):
+ if not (minimum <= value <= maximum):
+ raise ValueError('Expected float in the range [{0},{1}], not {2}'.format(minimum, maximum, value))
+ return
+ elif minimum is not None:
+ def check_range(value):
+ if value < minimum:
+ raise ValueError('Expected float in the range [{0},+∞], not {1}'.format(minimum, value))
+ return
+ elif maximum is not None:
+ def check_range(value):
+ if value > maximum:
+ raise ValueError('Expected float in the range [-∞,{0}], not {1}'.format(maximum, value))
+ return
+ else:
+ def check_range(value):
+ return
+
+ self.check_range = check_range
+ return
+
+ def __call__(self, value):
+ if value is None:
+ return None
+ try:
+ value = float(value)
+ except ValueError:
+ raise ValueError('Expected float value, not {}'.format(json_encode_string(value)))
+
+ self.check_range(value)
+ return value
+
+ def format(self, value):
+ return None if value is None else six.text_type(float(value))
+
+
class Duration(Validator):
""" Validates duration option values.
@@ -249,10 +293,10 @@ class List(Validator):
class Dialect(csv.Dialect):
""" Describes the properties of list option values. """
strict = True
- delimiter = b','
- quotechar = b'"'
+ delimiter = str(',')
+ quotechar = str('"')
doublequote = True
- lineterminator = b'\n'
+ lineterminator = str('\n')
skipinitialspace = True
quoting = csv.QUOTE_MINIMAL
@@ -386,4 +430,4 @@ def format(self, value):
return self.__call__(value)
-__all__ = ['Boolean', 'Code', 'Duration', 'File', 'Integer', 'List', 'Map', 'RegularExpression', 'Set']
+__all__ = ['Boolean', 'Code', 'Duration', 'File', 'Integer', 'Float', 'List', 'Map', 'RegularExpression', 'Set']
diff --git a/bin/ta_dmarc/aob_py2/splunklib/six.py b/bin/ta_dmarc/aob_py2/splunklib/six.py
index 190c023..d13e50c 100644
--- a/bin/ta_dmarc/aob_py2/splunklib/six.py
+++ b/bin/ta_dmarc/aob_py2/splunklib/six.py
@@ -1,6 +1,4 @@
-"""Utilities for writing code that runs on Python 2 and 3"""
-
-# Copyright (c) 2010-2015 Benjamin Peterson
+# Copyright (c) 2010-2020 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@@ -20,6 +18,8 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
+"""Utilities for writing code that runs on Python 2 and 3"""
+
from __future__ import absolute_import
import functools
@@ -29,7 +29,7 @@
import types
__author__ = "Benjamin Peterson "
-__version__ = "1.10.0"
+__version__ = "1.14.0"
# Useful for very coarse version differentiation.
@@ -241,6 +241,7 @@ class _MovedItems(_LazyModule):
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("getoutput", "commands", "subprocess"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
@@ -254,18 +255,21 @@ class _MovedItems(_LazyModule):
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
+ MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
- MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+ MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
- MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
@@ -337,10 +341,12 @@ class Module_six_moves_urllib_parse(_LazyModule):
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("splitvalue", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
@@ -416,6 +422,8 @@ class Module_six_moves_urllib_request(_LazyModule):
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+ MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
+ MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
@@ -631,13 +639,16 @@ def u(s):
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
+ del io
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
+ _assertNotRegex = "assertNotRegex"
else:
def b(s):
return s
@@ -659,6 +670,7 @@ def indexbytes(buf, i):
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
@@ -675,15 +687,23 @@ def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
+def assertNotRegex(self, *args, **kwargs):
+ return getattr(self, _assertNotRegex)(*args, **kwargs)
+
+
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
- if value is None:
- value = tp()
- if value.__traceback__ is not tb:
- raise value.with_traceback(tb)
- raise value
+ try:
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+ finally:
+ value = None
+ tb = None
else:
def exec_(_code_, _globs_=None, _locs_=None):
@@ -699,19 +719,19 @@ def exec_(_code_, _globs_=None, _locs_=None):
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
- raise tp, value, tb
+ try:
+ raise tp, value, tb
+ finally:
+ tb = None
""")
-if sys.version_info[:2] == (3, 2):
+if sys.version_info[:2] > (3,):
exec_("""def raise_from(value, from_value):
- if from_value is None:
- raise value
- raise value from from_value
-""")
-elif sys.version_info[:2] > (3, 2):
- exec_("""def raise_from(value, from_value):
- raise value from from_value
+ try:
+ raise value from from_value
+ finally:
+ value = None
""")
else:
def raise_from(value, from_value):
@@ -786,13 +806,33 @@ def print_(*args, **kwargs):
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
+ # This does exactly the same what the :func:`py3:functools.update_wrapper`
+ # function does on Python versions after 3.2. It sets the ``__wrapped__``
+ # attribute on ``wrapper`` object and it doesn't raise an error if any of
+ # the attributes mentioned in ``assigned`` and ``updated`` are missing on
+ # ``wrapped`` object.
+ def _update_wrapper(wrapper, wrapped,
+ assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ for attr in assigned:
+ try:
+ value = getattr(wrapped, attr)
+ except AttributeError:
+ continue
+ else:
+ setattr(wrapper, attr, value)
+ for attr in updated:
+ getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+ wrapper.__wrapped__ = wrapped
+ return wrapper
+ _update_wrapper.__doc__ = functools.update_wrapper.__doc__
+
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
- def wrapper(f):
- f = functools.wraps(wrapped, assigned, updated)(f)
- f.__wrapped__ = wrapped
- return f
- return wrapper
+ return functools.partial(_update_wrapper, wrapped=wrapped,
+ assigned=assigned, updated=updated)
+ wraps.__doc__ = functools.wraps.__doc__
+
else:
wraps = functools.wraps
@@ -802,10 +842,22 @@ def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
- class metaclass(meta):
+ class metaclass(type):
def __new__(cls, name, this_bases, d):
- return meta(name, bases, d)
+ if sys.version_info[:2] >= (3, 7):
+ # This version introduced PEP 560 that requires a bit
+ # of extra care (we mimic what is done by __build_class__).
+ resolved_bases = types.resolve_bases(bases)
+ if resolved_bases is not bases:
+ d['__orig_bases__'] = bases
+ else:
+ resolved_bases = bases
+ return meta(name, resolved_bases, d)
+
+ @classmethod
+ def __prepare__(cls, name, this_bases):
+ return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
@@ -821,13 +873,73 @@ def wrapper(cls):
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
+ if hasattr(cls, '__qualname__'):
+ orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
+def ensure_binary(s, encoding='utf-8', errors='strict'):
+ """Coerce **s** to six.binary_type.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> encoded to `bytes`
+ - `bytes` -> `bytes`
+ """
+ if isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ elif isinstance(s, binary_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def ensure_str(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to `str`.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if not isinstance(s, (text_type, binary_type)):
+ raise TypeError("not expecting type '%s'" % type(s))
+ if PY2 and isinstance(s, text_type):
+ s = s.encode(encoding, errors)
+ elif PY3 and isinstance(s, binary_type):
+ s = s.decode(encoding, errors)
+ return s
+
+
+def ensure_text(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to six.text_type.
+
+ For Python 2:
+ - `unicode` -> `unicode`
+ - `str` -> `unicode`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif isinstance(s, text_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
def python_2_unicode_compatible(klass):
"""
- A decorator that defines __unicode__ and __str__ methods under Python 2.
+ A class decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
@@ -866,3 +978,16 @@ def python_2_unicode_compatible(klass):
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
+
+import warnings
+
+def deprecated(message):
+ def deprecated_decorator(func):
+ def deprecated_func(*args, **kwargs):
+ warnings.warn("{} is a deprecated function. {}".format(func.__name__, message),
+ category=DeprecationWarning,
+ stacklevel=2)
+ warnings.simplefilter('default', DeprecationWarning)
+ return func(*args, **kwargs)
+ return deprecated_func
+ return deprecated_decorator
\ No newline at end of file
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/__init__.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/__init__.py
index c437b0e..1f9fc68 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/__init__.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/__init__.py
@@ -15,6 +15,21 @@
"""Python library for Splunk."""
from __future__ import absolute_import
-from .six.moves import map
-__version_info__ = (1, 6, 6)
+from splunklib.six.moves import map
+import logging
+
+DEFAULT_LOG_FORMAT = '%(asctime)s, Level=%(levelname)s, Pid=%(process)s, Logger=%(name)s, File=%(filename)s, ' \
+ 'Line=%(lineno)s, %(message)s'
+DEFAULT_DATE_FORMAT = '%Y-%m-%d %H:%M:%S %Z'
+
+
+# To set the logging level of splunklib
+# ex. To enable debug logs, call this method with parameter 'logging.DEBUG'
+# default logging level is set to 'WARNING'
+def setup_logging(level, log_format=DEFAULT_LOG_FORMAT, date_format=DEFAULT_DATE_FORMAT):
+ logging.basicConfig(level=level,
+ format=log_format,
+ datefmt=date_format)
+
+__version_info__ = (1, 6, 20)
__version__ = ".".join(map(str, __version_info__))
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/binding.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/binding.py
index 8bfa28d..bb2771d 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/binding.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/binding.py
@@ -31,6 +31,7 @@
import socket
import ssl
import sys
+import time
from base64 import b64encode
from contextlib import contextmanager
from datetime import datetime
@@ -38,9 +39,8 @@
from io import BytesIO
from xml.etree.ElementTree import XML
-from . import six
-from .six import StringIO
-from .six.moves import urllib
+from splunklib import six
+from splunklib.six.moves import urllib
from .data import record
@@ -49,6 +49,7 @@
except ImportError as e:
from xml.parsers.expat import ExpatError as ParseError
+logger = logging.getLogger(__name__)
__all__ = [
"AuthenticationError",
@@ -70,7 +71,7 @@ def new_f(*args, **kwargs):
start_time = datetime.now()
val = f(*args, **kwargs)
end_time = datetime.now()
- logging.debug("Operation took %s", end_time-start_time)
+ logger.debug("Operation took %s", end_time-start_time)
return val
return new_f
@@ -80,6 +81,7 @@ def _parse_cookies(cookie_str, dictionary):
then updates the the dictionary with any key-value pairs found.
**Example**::
+
dictionary = {}
_parse_cookies('my=value', dictionary)
# Now the following is True
@@ -295,8 +297,7 @@ def wrapper(self, *args, **kwargs):
with _handle_auth_error("Autologin failed."):
self.login()
with _handle_auth_error(
- "Autologin succeeded, but there was an auth error on "
- "next request. Something is very wrong."):
+ "Authentication Failed! If session token is used, it seems to have been expired."):
return request_fun(self, *args, **kwargs)
elif he.status == 401 and not self.autologin:
raise AuthenticationError(
@@ -449,8 +450,16 @@ class Context(object):
:type username: ``string``
:param password: The password for the Splunk account.
:type password: ``string``
+ :param splunkToken: Splunk authentication token
+ :type splunkToken: ``string``
:param headers: List of extra HTTP headers to send (optional).
:type headers: ``list`` of 2-tuples.
+ :param retires: Number of retries for each HTTP connection (optional, the default is 0).
+ NOTE THAT THIS MAY INCREASE THE NUMBER OF ROUND TRIP CONNECTIONS TO THE SPLUNK SERVER AND BLOCK THE
+ CURRENT THREAD WHILE RETRYING.
+ :type retries: ``int``
+ :param retryDelay: How long to wait between connection attempts if `retries` > 0 (optional, defaults to 10s).
+ :type retryDelay: ``int`` (in seconds)
:param handler: The HTTP request handler (optional).
:returns: A ``Context`` instance.
@@ -468,7 +477,8 @@ class Context(object):
"""
def __init__(self, handler=None, **kwargs):
self.http = HttpLib(handler, kwargs.get("verify", False), key_file=kwargs.get("key_file"),
- cert_file=kwargs.get("cert_file")) # Default to False for backward compat
+ cert_file=kwargs.get("cert_file"), context=kwargs.get("context"), # Default to False for backward compat
+ retries=kwargs.get("retries", 0), retryDelay=kwargs.get("retryDelay", 10))
self.token = kwargs.get("token", _NoAuthenticationToken)
if self.token is None: # In case someone explicitly passes token=None
self.token = _NoAuthenticationToken
@@ -480,6 +490,7 @@ def __init__(self, handler=None, **kwargs):
self.username = kwargs.get("username", "")
self.password = kwargs.get("password", "")
self.basic = kwargs.get("basic", False)
+ self.bearerToken = kwargs.get("splunkToken", "")
self.autologin = kwargs.get("autologin", False)
self.additional_headers = kwargs.get("headers", [])
@@ -496,13 +507,13 @@ def get_cookies(self):
return self.http._cookies
def has_cookies(self):
- """Returns true if the ``HttpLib`` member of this instance has at least
- one cookie stored.
+ """Returns true if the ``HttpLib`` member of this instance has auth token stored.
- :return: ``True`` if there is at least one cookie, else ``False``
+ :return: ``True`` if there is auth token present, else ``False``
:rtype: ``bool``
"""
- return len(self.get_cookies()) > 0
+ auth_token_key = "splunkd_"
+ return any(auth_token_key in key for key in self.get_cookies().keys())
# Shared per-context request headers
@property
@@ -520,6 +531,9 @@ def _auth_headers(self):
elif self.basic and (self.username and self.password):
token = 'Basic %s' % b64encode(("%s:%s" % (self.username, self.password)).encode('utf-8')).decode('ascii')
return [("Authorization", token)]
+ elif self.bearerToken:
+ token = 'Bearer %s' % self.bearerToken
+ return [("Authorization", token)]
elif self.token is _NoAuthenticationToken:
return []
else:
@@ -611,7 +625,7 @@ def delete(self, path_segment, owner=None, app=None, sharing=None, **query):
"""
path = self.authority + self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
- logging.debug("DELETE request to %s (body: %s)", path, repr(query))
+ logger.debug("DELETE request to %s (body: %s)", path, repr(query))
response = self.http.delete(path, self._auth_headers, **query)
return response
@@ -674,7 +688,7 @@ def get(self, path_segment, owner=None, app=None, headers=None, sharing=None, **
path = self.authority + self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
- logging.debug("GET request to %s (body: %s)", path, repr(query))
+ logger.debug("GET request to %s (body: %s)", path, repr(query))
all_headers = headers + self.additional_headers + self._auth_headers
response = self.http.get(path, all_headers, **query)
return response
@@ -717,7 +731,12 @@ def post(self, path_segment, owner=None, app=None, sharing=None, headers=None, *
:type headers: ``list`` of 2-tuples.
:param query: All other keyword arguments, which are used as query
parameters.
- :type query: ``string``
+ :param body: Parameters to be used in the post body. If specified,
+ any parameters in the query will be applied to the URL instead of
+ the body. If a dict is supplied, the key-value pairs will be form
+ encoded. If a string is supplied, the body will be passed through
+ in the request unchanged.
+ :type body: ``dict`` or ``str``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
@@ -747,14 +766,20 @@ def post(self, path_segment, owner=None, app=None, sharing=None, headers=None, *
headers = []
path = self.authority + self._abspath(path_segment, owner=owner, app=app, sharing=sharing)
- logging.debug("POST request to %s (body: %s)", path, repr(query))
+
+ # To avoid writing sensitive data in debug logs
+ endpoint_having_sensitive_data = ["/storage/passwords"]
+ if any(endpoint in path for endpoint in endpoint_having_sensitive_data):
+ logger.debug("POST request to %s ", path)
+ else:
+ logger.debug("POST request to %s (body: %s)", path, repr(query))
all_headers = headers + self.additional_headers + self._auth_headers
response = self.http.post(path, all_headers, **query)
return response
@_authentication
@_log_duration
- def request(self, path_segment, method="GET", headers=None, body="",
+ def request(self, path_segment, method="GET", headers=None, body={},
owner=None, app=None, sharing=None):
"""Issues an arbitrary HTTP request to the REST path segment.
@@ -783,9 +808,6 @@ def request(self, path_segment, method="GET", headers=None, body="",
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
- :param query: All other keyword arguments, which are used as query
- parameters.
- :type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
@@ -814,13 +836,28 @@ def request(self, path_segment, method="GET", headers=None, body="",
path = self.authority \
+ self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
+
all_headers = headers + self.additional_headers + self._auth_headers
- logging.debug("%s request to %s (headers: %s, body: %s)",
+ logger.debug("%s request to %s (headers: %s, body: %s)",
method, path, str(all_headers), repr(body))
- response = self.http.request(path,
- {'method': method,
- 'headers': all_headers,
- 'body': body})
+
+ if body:
+ body = _encode(**body)
+
+ if method == "GET":
+ path = path + UrlEncoded('?' + body, skip_encode=True)
+ message = {'method': method,
+ 'headers': all_headers}
+ else:
+ message = {'method': method,
+ 'headers': all_headers,
+ 'body': body}
+ else:
+ message = {'method': method,
+ 'headers': all_headers}
+
+ response = self.http.request(path, message)
+
return response
def login(self):
@@ -862,6 +899,10 @@ def login(self):
# as credentials were passed in.
return
+ if self.bearerToken:
+ # Bearer auth mode requested, so this method is a nop as long
+ # as authentication token was passed in.
+ return
# Only try to get a token and updated cookie if username & password are specified
try:
response = self.http.post(
@@ -1004,7 +1045,7 @@ class HTTPError(Exception):
def __init__(self, response, _message=None):
status = response.status
reason = response.reason
- body = (response.body.read()).decode()
+ body = response.body.read()
try:
detail = XML(body).findtext("./messages/msg")
except ParseError as err:
@@ -1054,7 +1095,7 @@ def __init__(self, message, cause):
#
# Encode the given kwargs as a query string. This wrapper will also _encode
-# a list value as a sequence of assignemnts to the corresponding arg name,
+# a list value as a sequence of assignments to the corresponding arg name,
# for example an argument such as 'foo=[1,2,3]' will be encoded as
# 'foo=1&foo=2&foo=3'.
def _encode(**kwargs):
@@ -1121,12 +1162,14 @@ class HttpLib(object):
If using the default handler, SSL verification can be disabled by passing verify=False.
"""
- def __init__(self, custom_handler=None, verify=False, key_file=None, cert_file=None):
+ def __init__(self, custom_handler=None, verify=False, key_file=None, cert_file=None, context=None, retries=0, retryDelay=10):
if custom_handler is None:
- self.handler = handler(verify=verify, key_file=key_file, cert_file=cert_file)
+ self.handler = handler(verify=verify, key_file=key_file, cert_file=cert_file, context=context)
else:
self.handler = custom_handler
self._cookies = {}
+ self.retries = retries
+ self.retryDelay = retryDelay
def delete(self, url, headers=None, **kwargs):
"""Sends a DELETE request to a URL.
@@ -1212,6 +1255,8 @@ def post(self, url, headers=None, **kwargs):
headers.append(("Content-Type", "application/x-www-form-urlencoded"))
body = kwargs.pop('body')
+ if isinstance(body, dict):
+ body = _encode(**body).encode('utf-8')
if len(kwargs) > 0:
url = url + UrlEncoded('?' + _encode(**kwargs), skip_encode=True)
else:
@@ -1238,7 +1283,16 @@ def request(self, url, message, **kwargs):
its structure).
:rtype: ``dict``
"""
- response = self.handler(url, message, **kwargs)
+ while True:
+ try:
+ response = self.handler(url, message, **kwargs)
+ break
+ except Exception:
+ if self.retries <= 0:
+ raise
+ else:
+ time.sleep(self.retryDelay)
+ self.retries -= 1
response = record(response)
if 400 <= response.status:
raise HTTPError(response)
@@ -1274,7 +1328,10 @@ def __init__(self, response, connection=None):
self._buffer = b''
def __str__(self):
- return self.read()
+ if six.PY2:
+ return self.read()
+ else:
+ return str(self.read(), 'UTF-8')
@property
def empty(self):
@@ -1333,7 +1390,7 @@ def readinto(self, byte_array):
return bytes_read
-def handler(key_file=None, cert_file=None, timeout=None, verify=False):
+def handler(key_file=None, cert_file=None, timeout=None, verify=False, context=None):
"""This class returns an instance of the default HTTP request handler using
the values you provide.
@@ -1345,6 +1402,8 @@ def handler(key_file=None, cert_file=None, timeout=None, verify=False):
:type timeout: ``integer`` or "None"
:param `verify`: Set to False to disable SSL verification on https connections.
:type verify: ``Boolean``
+ :param `context`: The SSLContext that can is used with the HTTPSConnection when verify=True is enabled and context is specified
+ :type context: ``SSLContext`
"""
def connect(scheme, host, port):
@@ -1356,9 +1415,12 @@ def connect(scheme, host, port):
if key_file is not None: kwargs['key_file'] = key_file
if cert_file is not None: kwargs['cert_file'] = cert_file
- # If running Python 2.7.9+, disable SSL certificate validation
- if (sys.version_info >= (2,7,9) and key_file is None and cert_file is None) and not verify:
+ if not verify:
kwargs['context'] = ssl._create_unverified_context()
+ elif context:
+ # verify is True in elif branch and context is not None
+ kwargs['context'] = context
+
return six.moves.http_client.HTTPSConnection(host, port, **kwargs)
raise ValueError("unsupported scheme: %s" % scheme)
@@ -1368,7 +1430,7 @@ def request(url, message, **kwargs):
head = {
"Content-Length": str(len(body)),
"Host": host,
- "User-Agent": "splunk-sdk-python/1.6.6",
+ "User-Agent": "splunk-sdk-python/1.6.20",
"Accept": "*/*",
"Connection": "Close",
} # defaults
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/client.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/client.py
index cb04093..35d9e4f 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/client.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/client.py
@@ -66,8 +66,8 @@
from datetime import datetime, timedelta
from time import sleep
-from . import six
-from .six.moves import urllib
+from splunklib import six
+from splunklib.six.moves import urllib
from . import data
from .binding import (AuthenticationError, Context, HTTPError, UrlEncoded,
@@ -75,6 +75,8 @@
namespace)
from .data import record
+logger = logging.getLogger(__name__)
+
__all__ = [
"connect",
"NotSupportedError",
@@ -224,7 +226,10 @@ def _load_atom_entries(response):
# Load the sid from the body of the given response
-def _load_sid(response):
+def _load_sid(response, output_mode):
+ if output_mode == "json":
+ json_obj = json.loads(response.body.read())
+ return json_obj.get('sid')
return _load_atom(response).response.sid
@@ -295,7 +300,7 @@ def connect(**kwargs):
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
- :param verify: Enable (True) or disable (False) SSL verrification for
+ :param verify: Enable (True) or disable (False) SSL verification for
https connections. (optional, the default is True)
:type verify: ``Boolean``
:param `owner`: The owner context of the namespace (optional).
@@ -318,6 +323,13 @@ def connect(**kwargs):
:type username: ``string``
:param `password`: The password for the Splunk account.
:type password: ``string``
+ :param retires: Number of retries for each HTTP connection (optional, the default is 0).
+ NOTE THAT THIS MAY INCREASE THE NUMBER OF ROUND TRIP CONNECTIONS TO THE SPLUNK SERVER.
+ :type retries: ``int``
+ :param retryDelay: How long to wait between connection attempts if `retries` > 0 (optional, defaults to 10s).
+ :type retryDelay: ``int`` (in seconds)
+ :param `context`: The SSLContext that can be used when setting verify=True (optional)
+ :type context: ``SSLContext``
:return: An initialized :class:`Service` connection.
**Example**::
@@ -365,7 +377,7 @@ class Service(_BaseService):
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
- :param verify: Enable (True) or disable (False) SSL verrification for
+ :param verify: Enable (True) or disable (False) SSL verification for
https connections. (optional, the default is True)
:type verify: ``Boolean``
:param `owner`: The owner context of the namespace (optional; use "-" for wildcard).
@@ -384,6 +396,11 @@ class Service(_BaseService):
:param `password`: The password, which is used to authenticate the Splunk
instance.
:type password: ``string``
+ :param retires: Number of retries for each HTTP connection (optional, the default is 0).
+ NOTE THAT THIS MAY INCREASE THE NUMBER OF ROUND TRIP CONNECTIONS TO THE SPLUNK SERVER.
+ :type retries: ``int``
+ :param retryDelay: How long to wait between connection attempts if `retries` > 0 (optional, defaults to 10s).
+ :type retryDelay: ``int`` (in seconds)
:return: A :class:`Service` instance.
**Example**::
@@ -401,6 +418,7 @@ class Service(_BaseService):
def __init__(self, **kwargs):
super(Service, self).__init__(**kwargs)
self._splunk_version = None
+ self._kvstore_owner = None
@property
def apps(self):
@@ -463,6 +481,13 @@ def info(self):
response = self.get("/services/server/info")
return _filter_content(_load_atom(response, MATCH_ENTRY_CONTENT))
+ def input(self, path, kind=None):
+ """Retrieves an input by path, and optionally kind.
+
+ :return: A :class:`Input` object.
+ """
+ return Input(self, path, kind=kind).refresh()
+
@property
def inputs(self):
"""Returns the collection of inputs configured on this Splunk instance.
@@ -666,12 +691,34 @@ def splunk_version(self):
self._splunk_version = tuple([int(p) for p in self.info['version'].split('.')])
return self._splunk_version
+ @property
+ def kvstore_owner(self):
+ """Returns the KVStore owner for this instance of Splunk.
+
+ By default is the kvstore owner is not set, it will return "nobody"
+ :return: A string with the KVStore owner.
+ """
+ if self._kvstore_owner is None:
+ self._kvstore_owner = "nobody"
+ return self._kvstore_owner
+
+ @kvstore_owner.setter
+ def kvstore_owner(self, value):
+ """
+ kvstore is refreshed, when the owner value is changed
+ """
+ self._kvstore_owner = value
+ self.kvstore
+
@property
def kvstore(self):
"""Returns the collection of KV Store collections.
+ sets the owner for the namespace, before retrieving the KVStore Collection
+
:return: A :class:`KVStoreCollections` collection of :class:`KVStoreCollection` entities.
"""
+ self.namespace['owner'] = self.kvstore_owner
return KVStoreCollections(self)
@property
@@ -692,7 +739,7 @@ class Endpoint(object):
"""
def __init__(self, service, path):
self.service = service
- self.path = path if path.endswith('/') else path + '/'
+ self.path = path
def get(self, path_segment="", owner=None, app=None, sharing=None, **query):
"""Performs a GET operation on the path segment relative to this endpoint.
@@ -750,6 +797,8 @@ def get(self, path_segment="", owner=None, app=None, sharing=None, **query):
if path_segment.startswith('/'):
path = path_segment
else:
+ if not self.path.endswith('/') and path_segment != "":
+ self.path = self.path + '/'
path = self.service._abspath(self.path + path_segment, owner=owner,
app=app, sharing=sharing)
# ^-- This was "%s%s" % (self.path, path_segment).
@@ -810,6 +859,8 @@ def post(self, path_segment="", owner=None, app=None, sharing=None, **query):
if path_segment.startswith('/'):
path = path_segment
else:
+ if not self.path.endswith('/') and path_segment != "":
+ self.path = self.path + '/'
path = self.service._abspath(self.path + path_segment, owner=owner, app=app, sharing=sharing)
return self.service.post(path, owner=owner, app=app, sharing=sharing, **query)
@@ -821,35 +872,24 @@ class Entity(Endpoint):
``Entity`` provides the majority of functionality required by entities.
Subclasses only implement the special cases for individual entities.
- For example for deployment serverclasses, the subclass makes whitelists and
- blacklists into Python lists.
+ For example for saved searches, the subclass makes fields like ``action.email``,
+ ``alert_type``, and ``search`` available.
An ``Entity`` is addressed like a dictionary, with a few extensions,
- so the following all work::
-
- ent['email.action']
- ent['disabled']
- ent['whitelist']
-
- Many endpoints have values that share a prefix, such as
- ``email.to``, ``email.action``, and ``email.subject``. You can extract
- the whole fields, or use the key ``email`` to get a dictionary of
- all the subelements. That is, ``ent['email']`` returns a
- dictionary with the keys ``to``, ``action``, ``subject``, and so on. If
- there are multiple levels of dots, each level is made into a
- subdictionary, so ``email.body.salutation`` can be accessed at
- ``ent['email']['body']['salutation']`` or
- ``ent['email.body.salutation']``.
+ so the following all work, for example in saved searches::
+
+ ent['action.email']
+ ent['alert_type']
+ ent['search']
You can also access the fields as though they were the fields of a Python
object, as in::
- ent.email.action
- ent.disabled
- ent.whitelist
+ ent.alert_type
+ ent.search
However, because some of the field names are not valid Python identifiers,
- the dictionary-like syntax is preferrable.
+ the dictionary-like syntax is preferable.
The state of an :class:`Entity` object is cached, so accessing a field
does not contact the server. If you think the values on the
@@ -946,7 +986,10 @@ def __getitem__(self, key):
def _load_atom_entry(self, response):
elem = _load_atom(response, XNAME_ENTRY)
if isinstance(elem, list):
- raise AmbiguousReferenceException("Fetch from server returned multiple entries for name %s." % self.name)
+ apps = [ele.entry.content.get('eai:appName') for ele in elem]
+
+ raise AmbiguousReferenceException(
+ "Fetch from server returned multiple entries for name '%s' in apps %s." % (elem[0].entry.title, apps))
else:
return elem.entry
@@ -1052,8 +1095,6 @@ def content(self):
def disable(self):
"""Disables the entity at this endpoint."""
self.post("disable")
- if self.service.restart_required:
- self.service.restart(120)
return self
def enable(self):
@@ -1200,7 +1241,7 @@ def __getitem__(self, key):
:raises ValueError: Raised if no namespace is specified and *key*
does not refer to a unique name.
- *Example*::
+ **Example**::
s = client.connect(...)
saved_searches = s.saved_searches
@@ -1437,7 +1478,7 @@ def iter(self, offset=0, count=None, pagesize=None, **kwargs):
if pagesize is None or N < pagesize:
break
offset += N
- logging.debug("pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s", pagesize, fetched, offset, N, kwargs)
+ logger.debug("pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s", pagesize, fetched, offset, N, kwargs)
# kwargs: count, offset, search, sort_dir, sort_key, sort_mode
def list(self, count=None, **kwargs):
@@ -1636,9 +1677,9 @@ def get(self, name="", owner=None, app=None, sharing=None, **query):
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
- Example:
+ **Example**::
- import splunklib.client
+ import splunklib.client
s = client.service(...)
saved_searches = s.saved_searches
saved_searches.get("my/saved/search") == \\
@@ -1865,7 +1906,7 @@ def delete(self, username, realm=None):
name = UrlEncoded(realm, encode_slash=True) + ":" + UrlEncoded(username, encode_slash=True)
# Append the : expected at the end of the name
- if name[-1] is not ":":
+ if name[-1] != ":":
name = name + ":"
return Collection.delete(self, name)
@@ -2079,10 +2120,6 @@ def submit(self, event, host=None, source=None, sourcetype=None):
if source is not None: args['source'] = source
if sourcetype is not None: args['sourcetype'] = sourcetype
- # The reason we use service.request directly rather than POST
- # is that we are not sending a POST request encoded using
- # x-www-form-urlencoded (as we do not have a key=value body),
- # because we aren't really sending a "form".
self.service.post(PATH_RECEIVERS_SIMPLE, body=event, **args)
return self
@@ -2510,9 +2547,9 @@ def list(self, *kinds, **kwargs):
kinds = self.kinds
if len(kinds) == 1:
kind = kinds[0]
- logging.debug("Inputs.list taking short circuit branch for single kind.")
+ logger.debug("Inputs.list taking short circuit branch for single kind.")
path = self.kindpath(kind)
- logging.debug("Path for inputs: %s", path)
+ logger.debug("Path for inputs: %s", path)
try:
path = UrlEncoded(path, skip_encode=True)
response = self.get(path, **kwargs)
@@ -2730,9 +2767,8 @@ def pause(self):
return self
def results(self, **query_params):
- """Returns a streaming handle to this job's search results. To get a
- nice, Pythonic iterator, pass the handle to :class:`splunklib.results.ResultsReader`,
- as in::
+ """Returns a streaming handle to this job's search results. To get a nice, Pythonic iterator, pass the handle
+ to :class:`splunklib.results.JSONResultsReader` along with the query param "output_mode='json'", as in::
import splunklib.client as client
import splunklib.results as results
@@ -2741,7 +2777,7 @@ def results(self, **query_params):
job = service.jobs.create("search * | head 5")
while not job.is_done():
sleep(.2)
- rr = results.ResultsReader(job.results())
+ rr = results.JSONResultsReader(job.results(output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -2771,19 +2807,17 @@ def results(self, **query_params):
def preview(self, **query_params):
"""Returns a streaming handle to this job's preview search results.
- Unlike :class:`splunklib.results.ResultsReader`, which requires a job to
- be finished to
- return any results, the ``preview`` method returns any results that have
- been generated so far, whether the job is running or not. The
- returned search results are the raw data from the server. Pass
- the handle returned to :class:`splunklib.results.ResultsReader` to get a
- nice, Pythonic iterator over objects, as in::
+ Unlike :class:`splunklib.results.JSONResultsReader`along with the query param "output_mode='json'",
+ which requires a job to be finished to return any results, the ``preview`` method returns any results that
+ have been generated so far, whether the job is running or not. The returned search results are the raw data
+ from the server. Pass the handle returned to :class:`splunklib.results.JSONResultsReader` to get a nice,
+ Pythonic iterator over objects, as in::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
job = service.jobs.create("search * | head 5")
- rr = results.ResultsReader(job.preview())
+ rr = results.JSONResultsReader(job.preview(output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -2934,19 +2968,19 @@ def create(self, query, **kwargs):
if kwargs.get("exec_mode", None) == "oneshot":
raise TypeError("Cannot specify exec_mode=oneshot; use the oneshot method instead.")
response = self.post(search=query, **kwargs)
- sid = _load_sid(response)
+ sid = _load_sid(response, kwargs.get("output_mode", None))
return Job(self.service, sid)
def export(self, query, **params):
- """Runs a search and immediately starts streaming preview events.
- This method returns a streaming handle to this job's events as an XML
- document from the server. To parse this stream into usable Python objects,
- pass the handle to :class:`splunklib.results.ResultsReader`::
+ """Runs a search and immediately starts streaming preview events. This method returns a streaming handle to
+ this job's events as an XML document from the server. To parse this stream into usable Python objects,
+ pass the handle to :class:`splunklib.results.JSONResultsReader` along with the query param
+ "output_mode='json'"::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
- rr = results.ResultsReader(service.jobs.export("search * | head 5"))
+ rr = results.JSONResultsReader(service.jobs.export("search * | head 5",output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -2995,14 +3029,14 @@ def itemmeta(self):
def oneshot(self, query, **params):
"""Run a oneshot search and returns a streaming handle to the results.
- The ``InputStream`` object streams XML fragments from the server. To
- parse this stream into usable Python objects,
- pass the handle to :class:`splunklib.results.ResultsReader`::
+ The ``InputStream`` object streams fragments from the server. To parse this stream into usable Python
+ objects, pass the handle to :class:`splunklib.results.JSONResultsReader` along with the query param
+ "output_mode='json'" ::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
- rr = results.ResultsReader(service.jobs.oneshot("search * | head 5"))
+ rr = results.JSONResultsReader(service.jobs.oneshot("search * | head 5",output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -3150,7 +3184,7 @@ def dispatch(self, **kwargs):
:return: The :class:`Job`.
"""
response = self.post("dispatch", **kwargs)
- sid = _load_sid(response)
+ sid = _load_sid(response, kwargs.get("output_mode", None))
return Job(self.service, sid)
@property
@@ -3569,7 +3603,7 @@ class KVStoreCollection(Entity):
def data(self):
"""Returns data object for this Collection.
- :rtype: :class:`KVStoreData`
+ :rtype: :class:`KVStoreCollectionData`
"""
return KVStoreCollectionData(self)
@@ -3584,7 +3618,7 @@ def update_index(self, name, value):
:return: Result of POST request
"""
kwargs = {}
- kwargs['index.' + name] = value if isinstance(value, basestring) else json.dumps(value)
+ kwargs['index.' + name] = value if isinstance(value, six.string_types) else json.dumps(value)
return self.post(**kwargs)
def update_field(self, name, value):
@@ -3612,7 +3646,7 @@ def __init__(self, collection):
self.service = collection.service
self.collection = collection
self.owner, self.app, self.sharing = collection._proper_namespace()
- self.path = 'storage/collections/data/' + UrlEncoded(self.collection.name) + '/'
+ self.path = 'storage/collections/data/' + UrlEncoded(self.collection.name, encode_slash=True) + '/'
def _get(self, url, **kwargs):
return self.service.get(self.path + url, owner=self.owner, app=self.app, sharing=self.sharing, **kwargs)
@@ -3633,6 +3667,11 @@ def query(self, **query):
:return: Array of documents retrieved by query.
:rtype: ``array``
"""
+
+ for key, value in query.items():
+ if isinstance(query[key], dict):
+ query[key] = json.dumps(value)
+
return json.loads(self._get('', **query).body.read().decode('utf-8'))
def query_by_id(self, id):
@@ -3645,7 +3684,7 @@ def query_by_id(self, id):
:return: Document with id
:rtype: ``dict``
"""
- return json.loads(self._get(UrlEncoded(str(id))).body.read().decode('utf-8'))
+ return json.loads(self._get(UrlEncoded(str(id), encode_slash=True)).body.read().decode('utf-8'))
def insert(self, data):
"""
@@ -3657,6 +3696,8 @@ def insert(self, data):
:return: _id of inserted object
:rtype: ``dict``
"""
+ if isinstance(data, dict):
+ data = json.dumps(data)
return json.loads(self._post('', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
def delete(self, query=None):
@@ -3679,7 +3720,7 @@ def delete_by_id(self, id):
:return: Result of DELETE request
"""
- return self._delete(UrlEncoded(str(id)))
+ return self._delete(UrlEncoded(str(id), encode_slash=True))
def update(self, id, data):
"""
@@ -3693,7 +3734,9 @@ def update(self, id, data):
:return: id of replaced document
:rtype: ``dict``
"""
- return json.loads(self._post(UrlEncoded(str(id)), headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
+ if isinstance(data, dict):
+ data = json.dumps(data)
+ return json.loads(self._post(UrlEncoded(str(id), encode_slash=True), headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
def batch_find(self, *dbqueries):
"""
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/data.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/data.py
index c29063d..f9ffb86 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/data.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/data.py
@@ -19,7 +19,7 @@
from __future__ import absolute_import
import sys
from xml.etree.ElementTree import XML
-from . import six
+from splunklib import six
__all__ = ["load"]
@@ -161,8 +161,8 @@ def load_value(element, nametable=None):
text = element.text
if text is None:
return None
- text = text.strip()
- if len(text) == 0:
+
+ if len(text.strip()) == 0:
return None
return text
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/argument.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/argument.py
index 4c4b3c8..04214d1 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/argument.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/argument.py
@@ -54,9 +54,9 @@ def __init__(self, name, description=None, validation=None,
:param name: ``string``, identifier for this argument in Splunk.
:param description: ``string``, human-readable description of the argument.
:param validation: ``string`` specifying how the argument should be validated, if using internal validation.
- If using external validation, this will be ignored.
+ If using external validation, this will be ignored.
:param data_type: ``string``, data type of this field; use the class constants.
- "data_type_boolean", "data_type_number", or "data_type_string".
+ "data_type_boolean", "data_type_number", or "data_type_string".
:param required_on_edit: ``Boolean``, whether this arg is required when editing an existing modular input of this kind.
:param required_on_create: ``Boolean``, whether this arg is required when creating a modular input of this kind.
:param title: ``String``, a human-readable title for the argument.
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/event.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/event.py
index f840432..9cd6cf3 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/event.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/event.py
@@ -13,6 +13,9 @@
# under the License.
from __future__ import absolute_import
+from io import TextIOBase
+from splunklib.six import ensure_text
+
try:
import xml.etree.cElementTree as ET
except ImportError as ie:
@@ -104,5 +107,8 @@ def write_to(self, stream):
if self.done:
ET.SubElement(event, "done")
- stream.write(ET.tostring(event).decode())
+ if isinstance(stream, TextIOBase):
+ stream.write(ensure_text(ET.tostring(event)))
+ else:
+ stream.write(ET.tostring(event))
stream.flush()
\ No newline at end of file
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/event_writer.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/event_writer.py
old mode 100644
new mode 100755
index d8a2a2e..5f8c5aa
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/event_writer.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/event_writer.py
@@ -15,12 +15,16 @@
from __future__ import absolute_import
import sys
+from splunklib.six import ensure_str
from .event import ET
+try:
+ from splunklib.six.moves import cStringIO as StringIO
+except ImportError:
+ from splunklib.six import StringIO
class EventWriter(object):
"""``EventWriter`` writes events and error messages to Splunk from a modular input.
-
Its two important methods are ``writeEvent``, which takes an ``Event`` object,
and ``log``, which takes a severity and an error message.
"""
@@ -64,7 +68,7 @@ def log(self, severity, message):
:param message: ``string``, message to log.
"""
- self._err.write(("%s %s\n" % (severity, message)))
+ self._err.write("%s %s\n" % (severity, message))
self._err.flush()
def write_xml_document(self, document):
@@ -73,12 +77,11 @@ def write_xml_document(self, document):
:param document: An ``ElementTree`` object.
"""
- try:
- self._out.write(ET.tostring(document))
- except:
- self._out.write(ET.tostring(document, encoding="unicode"))
+ self._out.write(ensure_str(ET.tostring(document)))
self._out.flush()
def close(self):
"""Write the closing tag to make this XML well formed."""
- self._out.write("")
+ if self.header_written:
+ self._out.write("")
+ self._out.flush()
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/scheme.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/scheme.py
index ff4f978..4104e4a 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/scheme.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/scheme.py
@@ -55,7 +55,7 @@ def add_argument(self, arg):
def to_xml(self):
"""Creates an ``ET.Element`` representing self, then returns it.
- :returns root, an ``ET.Element`` representing this scheme.
+ :returns: an ``ET.Element`` representing this scheme.
"""
root = ET.Element("scheme")
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/script.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/script.py
index 86484ec..8595dc4 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/script.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/script.py
@@ -14,14 +14,14 @@
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
-from ..six.moves.urllib.parse import urlsplit
+from splunklib.six.moves.urllib.parse import urlsplit
import sys
from ..client import Service
from .event_writer import EventWriter
from .input_definition import InputDefinition
from .validation_definition import ValidationDefinition
-from .. import six
+from splunklib import six
try:
import xml.etree.cElementTree as ET
@@ -105,8 +105,7 @@ def run_script(self, args, event_writer, input_stream):
return 1
except Exception as e:
- err_string = EventWriter.ERROR + str(e)
- event_writer._err.write(err_string)
+ event_writer.log(EventWriter.ERROR, str(e))
return 1
@property
@@ -118,9 +117,9 @@ def service(self):
available as soon as the :code:`Script.stream_events` method is
called.
- :return: :class:splunklib.client.Service. A value of None is returned,
- if you call this method before the :code:`Script.stream_events` method
- is called.
+ :return: :class:`splunklib.client.Service`. A value of None is returned,
+ if you call this method before the :code:`Script.stream_events` method
+ is called.
"""
if self._service is not None:
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/utils.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/utils.py
index 47488dc..3d42b63 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/utils.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/utils.py
@@ -15,7 +15,7 @@
# File for utility functions
from __future__ import absolute_import
-from ..six.moves import zip
+from splunklib.six.moves import zip
def xml_compare(expected, found):
"""Checks equality of two ``ElementTree`` objects.
@@ -64,11 +64,14 @@ def parse_parameters(param_node):
def parse_xml_data(parent_node, child_node_tag):
data = {}
for child in parent_node:
+ child_name = child.get("name")
if child.tag == child_node_tag:
if child_node_tag == "stanza":
- data[child.get("name")] = {}
+ data[child_name] = {
+ "__app": child.get("app", None)
+ }
for param in child:
- data[child.get("name")][param.get("name")] = parse_parameters(param)
+ data[child_name][param.get("name")] = parse_parameters(param)
elif "item" == parent_node.tag:
- data[child.get("name")] = parse_parameters(child)
+ data[child_name] = parse_parameters(child)
return data
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/validation_definition.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/validation_definition.py
index 8904e40..3bbe976 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/validation_definition.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/modularinput/validation_definition.py
@@ -28,7 +28,7 @@ class ValidationDefinition(object):
**Example**::
- ``v = ValidationDefinition()``
+ v = ValidationDefinition()
"""
def __init__(self):
@@ -46,23 +46,25 @@ def parse(stream):
The XML typically will look like this:
- ````
- `` myHost``
- `` https://127.0.0.1:8089``
- `` 123102983109283019283``
- `` /opt/splunk/var/lib/splunk/modinputs``
- ``
- ``
- `` value1``
- `` ``
- `` value2``
- `` value3``
- `` value4``
- `` ``
- ``
``
- ````
+ .. code-block:: xml
+
+
+ myHost
+ https://127.0.0.1:8089
+ 123102983109283019283
+ /opt/splunk/var/lib/splunk/modinputs
+ -
+ value1
+
+ value2
+ value3
+ value4
+
+
+
:param stream: ``Stream`` containing XML to parse.
- :return definition: A ``ValidationDefinition`` object.
+ :return: A ``ValidationDefinition`` object.
"""
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/results.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/results.py
index 2a03a4f..8543ab0 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/results.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/results.py
@@ -23,7 +23,7 @@
accessing search results while avoiding buffering the result set, which can be
very large.
-To use the reader, instantiate :class:`ResultsReader` on a search result stream
+To use the reader, instantiate :class:`JSONResultsReader` on a search result stream
as follows:::
reader = ResultsReader(result_stream)
@@ -34,29 +34,32 @@
from __future__ import absolute_import
-from io import BytesIO
+from io import BufferedReader, BytesIO
+
+from splunklib import six
+
+from splunklib.six import deprecated
-from . import six
try:
import xml.etree.cElementTree as et
except:
import xml.etree.ElementTree as et
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from .ordereddict import OrderedDict
+from collections import OrderedDict
+from json import loads as json_loads
try:
- from .six.moves import cStringIO as StringIO
+ from splunklib.six.moves import cStringIO as StringIO
except:
- from .six import StringIO
+ from splunklib.six import StringIO
__all__ = [
"ResultsReader",
- "Message"
+ "Message",
+ "JSONResultsReader"
]
+
class Message(object):
"""This class represents informational messages that Splunk interleaves in the results stream.
@@ -67,6 +70,7 @@ class Message(object):
m = Message("DEBUG", "There's something in that variable...")
"""
+
def __init__(self, type_, message):
self.type = type_
self.message = message
@@ -80,6 +84,7 @@ def __eq__(self, other):
def __hash__(self):
return hash((self.type, self.message))
+
class _ConcatenatedStream(object):
"""Lazily concatenate zero or more streams into a stream.
@@ -92,6 +97,7 @@ class _ConcatenatedStream(object):
s = _ConcatenatedStream(StringIO("abc"), StringIO("def"))
assert s.read() == "abcdef"
"""
+
def __init__(self, *streams):
self.streams = list(streams)
@@ -110,6 +116,7 @@ def read(self, n=None):
del self.streams[0]
return response
+
class _XMLDTDFilter(object):
"""Lazily remove all XML DTDs from a stream.
@@ -123,6 +130,7 @@ class _XMLDTDFilter(object):
s = _XMLDTDFilter("")
assert s.read() == ""
"""
+
def __init__(self, stream):
self.stream = stream
@@ -153,6 +161,8 @@ def read(self, n=None):
n -= 1
return response
+
+@deprecated("Use the JSONResultsReader function instead in conjuction with the 'output_mode' query param set to 'json'")
class ResultsReader(object):
"""This class returns dictionaries and Splunk messages from an XML results
stream.
@@ -180,6 +190,7 @@ class ResultsReader(object):
print "Message: %s" % result
print "is_preview = %s " % reader.is_preview
"""
+
# Be sure to update the docstrings of client.Jobs.oneshot,
# client.Job.results_preview and client.Job.results to match any
# changes made to ResultsReader.
@@ -260,16 +271,16 @@ def _parse_results(self, stream):
# So we'll define it here
def __itertext(self):
- tag = self.tag
- if not isinstance(tag, six.string_types) and tag is not None:
- return
- if self.text:
- yield self.text
- for e in self:
- for s in __itertext(e):
- yield s
- if e.tail:
- yield e.tail
+ tag = self.tag
+ if not isinstance(tag, six.string_types) and tag is not None:
+ return
+ if self.text:
+ yield self.text
+ for e in self:
+ for s in __itertext(e):
+ yield s
+ if e.tail:
+ yield e.tail
text = "".join(__itertext(elem))
values.append(text)
@@ -291,5 +302,72 @@ def __itertext(self):
raise
+class JSONResultsReader(object):
+ """This class returns dictionaries and Splunk messages from a JSON results
+ stream.
+ ``JSONResultsReader`` is iterable, and returns a ``dict`` for results, or a
+ :class:`Message` object for Splunk messages. This class has one field,
+ ``is_preview``, which is ``True`` when the results are a preview from a
+ running search, or ``False`` when the results are from a completed search.
+
+ This function has no network activity other than what is implicit in the
+ stream it operates on.
+
+ :param `stream`: The stream to read from (any object that supports``.read()``).
+
+ **Example**::
+
+ import results
+ response = ... # the body of an HTTP response
+ reader = results.JSONResultsReader(response)
+ for result in reader:
+ if isinstance(result, dict):
+ print "Result: %s" % result
+ elif isinstance(result, results.Message):
+ print "Message: %s" % result
+ print "is_preview = %s " % reader.is_preview
+ """
+
+ # Be sure to update the docstrings of client.Jobs.oneshot,
+ # client.Job.results_preview and client.Job.results to match any
+ # changes made to JSONResultsReader.
+ #
+ # This wouldn't be a class, just the _parse_results function below,
+ # except that you cannot get the current generator inside the
+ # function creating that generator. Thus it's all wrapped up for
+ # the sake of one field.
+ def __init__(self, stream):
+ # The search/jobs/exports endpoint, when run with
+ # earliest_time=rt and latest_time=rt, output_mode=json, streams a sequence of
+ # JSON documents, each containing a result, as opposed to one
+ # results element containing lots of results.
+ stream = BufferedReader(stream)
+ self.is_preview = None
+ self._gen = self._parse_results(stream)
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ return next(self._gen)
+ __next__ = next
+ def _parse_results(self, stream):
+ """Parse results and messages out of *stream*."""
+ for line in stream.readlines():
+ strip_line = line.strip()
+ if strip_line.__len__() == 0: continue
+ parsed_line = json_loads(strip_line)
+ if "preview" in parsed_line:
+ self.is_preview = parsed_line["preview"]
+ if "messages" in parsed_line and parsed_line["messages"].__len__() > 0:
+ for message in parsed_line["messages"]:
+ msg_type = message.get("type", "Unknown Message Type")
+ text = message.get("text")
+ yield Message(msg_type, text)
+ if "result" in parsed_line:
+ yield parsed_line["result"]
+ if "results" in parsed_line:
+ for result in parsed_line["results"]:
+ yield result
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/__init__.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/__init__.py
index 12b14f3..8a92903 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/__init__.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/__init__.py
@@ -30,7 +30,7 @@
field-name = ( "_" / alpha ) *( alpha / digit / "_" / "." / "-" )
It does not show that :code:`field-name` values may be comma-separated. This is because Splunk strips commas from
- the command line. A search command will never see them.
+ the command line. A search command will never see them.
2. Search commands targeting versions of Splunk prior to 6.3 must be statically configured as follows:
@@ -134,9 +134,13 @@
.. topic:: References
- 1. `Search command style guide `_
+ 1. `Custom Search Command manual: `__
- 2. `Commands.conf.spec `_
+ 2. `Create Custom Search Commands with commands.conf.spec `_
+
+ 3. `Configure seach assistant with searchbnf.conf `_
+
+ 4. `Control search distribution with distsearch.conf `_
"""
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/decorators.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/decorators.py
index 5ef92f7..d8b3f48 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/decorators.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/decorators.py
@@ -15,15 +15,12 @@
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
-from .. import six
+from splunklib import six
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from ..ordereddict import OrderedDict
+from collections import OrderedDict # must be python 2.7
from inspect import getmembers, isclass, isfunction
-from ..six.moves import map as imap
+from splunklib.six.moves import map as imap
from .internals import ConfigurationSettingsType, json_encode_string
from .validators import OptionName
@@ -36,7 +33,7 @@ class Configuration(object):
variable to search command classes that don't have one. The :code:`name` is derived from the name of the class.
By convention command class names end with the word "Command". To derive :code:`name` the word "Command" is removed
from the end of the class name and then converted to lower case for conformance with the `Search command style guide
- `_
+ `__
"""
def __init__(self, o=None, **kwargs):
@@ -229,8 +226,9 @@ class Option(property):
Short form (recommended). When you are satisfied with built-in or custom validation behaviors.
- .. code-block:: python
+ .. code-block:: python
:linenos:
+
from splunklib.searchcommands.decorators import Option
from splunklib.searchcommands.validators import Fieldname
@@ -247,8 +245,9 @@ class Option(property):
also provide a deleter. You must be prepared to accept a value of :const:`None` which indicates that your
:code:`Option` is unset.
- .. code-block:: python
+ .. code-block:: python
:linenos:
+
from splunklib.searchcommands import Option
@Option()
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/environment.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/environment.py
index 6773e39..e92018f 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/environment.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/environment.py
@@ -19,7 +19,7 @@
from logging import getLogger, root, StreamHandler
from logging.config import fileConfig
from os import chdir, environ, path
-from ..six.moves import getcwd
+from splunklib.six.moves import getcwd
import sys
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/eventing_command.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/eventing_command.py
index 147c871..27dc13a 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/eventing_command.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/eventing_command.py
@@ -16,7 +16,8 @@
from __future__ import absolute_import, division, print_function, unicode_literals
-from ..six.moves import map as imap
+from splunklib import six
+from splunklib.six.moves import map as imap
from .decorators import ConfigurationSetting
from .search_command import SearchCommand
@@ -135,8 +136,14 @@ def fix_up(cls, command):
raise AttributeError('No EventingCommand.transform override')
SearchCommand.ConfigurationSettings.fix_up(command)
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
iteritems = SearchCommand.ConfigurationSettings.iteritems(self)
return imap(lambda name_value: (name_value[0], 'events' if name_value[0] == 'type' else name_value[1]), iteritems)
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
+
# endregion
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/external_search_command.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/external_search_command.py
index 989c4aa..c230624 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/external_search_command.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/external_search_command.py
@@ -20,7 +20,7 @@
import os
import sys
import traceback
-from .. import six
+from splunklib import six
if sys.platform == 'win32':
from signal import signal, CTRL_BREAK_EVENT, SIGBREAK, SIGINT, SIGTERM
@@ -105,13 +105,13 @@ def _execute(path, argv=None, environ=None):
:param argv: Argument list.
:type argv: list or tuple
- The arguments to the child process should start with the name of the command being run, but this is not
- enforced. A value of :const:`None` specifies that the base name of path name :param:`path` should be used.
+ The arguments to the child process should start with the name of the command being run, but this is not
+ enforced. A value of :const:`None` specifies that the base name of path name :param:`path` should be used.
:param environ: A mapping which is used to define the environment variables for the new process.
:type environ: dict or None.
- This mapping is used instead of the current process’s environment. A value of :const:`None` specifies that
- the :data:`os.environ` mapping should be used.
+ This mapping is used instead of the current process’s environment. A value of :const:`None` specifies that
+ the :data:`os.environ` mapping should be used.
:return: None
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/generating_command.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/generating_command.py
index 2f97300..6a75d2c 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/generating_command.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/generating_command.py
@@ -15,11 +15,13 @@
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
+import sys
from .decorators import ConfigurationSetting
from .search_command import SearchCommand
-from ..six.moves import map as imap, filter as ifilter
+from splunklib import six
+from splunklib.six.moves import map as imap, filter as ifilter
# P1 [O] TODO: Discuss generates_timeorder in the class-level documentation for GeneratingCommand
@@ -92,9 +94,10 @@ class StreamingGeneratingCommand(GeneratingCommand)
+==========+===================================================+===================================================+
| streams | 1. Add this line to your command's stanza in | 1. Add this configuration setting to your code: |
| | | |
- | | default/commands.conf. | .. code-block:: python |
- | | .. code-block:: python | @Configuration(distributed=True) |
- | | local = false | class SomeCommand(GeneratingCommand) |
+ | | default/commands.conf:: | .. code-block:: python |
+ | | | |
+ | | local = false | @Configuration(distributed=True) |
+ | | | class SomeCommand(GeneratingCommand) |
| | | ... |
| | 2. Restart splunk | |
| | | 2. You are good to go; no need to restart Splunk |
@@ -112,6 +115,7 @@ class StreamingGeneratingCommand(GeneratingCommand)
| | settings to your command class: | setting to your command class: |
| | | |
| | .. code-block:: python | .. code-block:: python |
+ | | | |
| | @Configuration( | @Configuration(type='events') |
| | retainsevents=True, streaming=False) | class SomeCommand(GeneratingCommand) |
| | class SomeCommand(GeneratingCommand) | ... |
@@ -119,22 +123,25 @@ class StreamingGeneratingCommand(GeneratingCommand)
| | | |
| | Or add these lines to default/commands.conf: | |
| | | |
- | | .. code-block:: | |
- | | retains events = true | |
+ | | .. code-block:: text | |
+ | | | |
+ | | retainsevents = true | |
| | streaming = false | |
+----------+---------------------------------------------------+---------------------------------------------------+
Configure your command class like this, if you wish to support both protocols:
- .. code-block:: python
+ .. code-block:: python
+
@Configuration(type='events', retainsevents=True, streaming=False)
class SomeCommand(GeneratingCommand)
...
You might also consider adding these lines to commands.conf instead of adding them to your command class:
- .. code-block:: python
- retains events = false
+ .. code-block:: python
+
+ retainsevents = false
streaming = false
Reporting Generating command
@@ -149,28 +156,32 @@ class SomeCommand(GeneratingCommand)
| | settings to your command class: | setting to your command class: |
| | | |
| | .. code-block:: python | .. code-block:: python |
+ | | | |
| | @Configuration(retainsevents=False) | @Configuration(type='reporting') |
| | class SomeCommand(GeneratingCommand) | class SomeCommand(GeneratingCommand) |
| | ... | ... |
| | | |
| | Or add this lines to default/commands.conf: | |
| | | |
- | | .. code-block:: | |
- | | retains events = false | |
+ | | .. code-block:: text | |
+ | | | |
+ | | retainsevents = false | |
| | streaming = false | |
+----------+---------------------------------------------------+---------------------------------------------------+
Configure your command class like this, if you wish to support both protocols:
- .. code-block:: python
+ .. code-block:: python
+
@Configuration(type='reporting', streaming=False)
class SomeCommand(GeneratingCommand)
...
You might also consider adding these lines to commands.conf instead of adding them to your command class:
- .. code-block:: python
- retains events = false
+ .. code-block:: text
+
+ retainsevents = false
streaming = false
"""
@@ -194,19 +205,57 @@ def _execute(self, ifile, process):
"""
if self._protocol_version == 2:
- result = self._read_chunk(ifile)
+ self._execute_v2(ifile, self.generate())
+ else:
+ assert self._protocol_version == 1
+ self._record_writer.write_records(self.generate())
+ self.finish()
- if not result:
- return
+ def _execute_chunk_v2(self, process, chunk):
+ count = 0
+ records = []
+ for row in process:
+ records.append(row)
+ count += 1
+ if count == self._record_writer._maxresultrows:
+ break
- metadata, body = result
- action = getattr(metadata, 'action', None)
+ for row in records:
+ self._record_writer.write_record(row)
- if action != 'execute':
- raise RuntimeError('Expected execute action, not {}'.format(action))
+ if count == self._record_writer._maxresultrows:
+ self._finished = False
+ else:
+ self._finished = True
- self._record_writer.write_records(self.generate())
- self.finish()
+ def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout, allow_empty_input=True):
+ """ Process data.
+
+ :param argv: Command line arguments.
+ :type argv: list or tuple
+
+ :param ifile: Input data file.
+ :type ifile: file
+
+ :param ofile: Output data file.
+ :type ofile: file
+
+ :param allow_empty_input: For generating commands, it must be true. Doing otherwise will cause an error.
+ :type allow_empty_input: bool
+
+ :return: :const:`None`
+ :rtype: NoneType
+
+ """
+
+ # Generating commands are expected to run on an empty set of inputs as the first command being run in a search,
+ # also this class implements its own separate _execute_chunk_v2 method which does not respect allow_empty_input
+ # so ensure that allow_empty_input is always True
+
+ if not allow_empty_input:
+ raise ValueError("allow_empty_input cannot be False for Generating Commands")
+ else:
+ return super(GeneratingCommand, self).process(argv=argv, ifile=ifile, ofile=ofile, allow_empty_input=True)
# endregion
@@ -315,6 +364,8 @@ def fix_up(cls, command):
if command.generate == GeneratingCommand.generate:
raise AttributeError('No GeneratingCommand.generate override')
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
iteritems = SearchCommand.ConfigurationSettings.iteritems(self)
version = self.command.protocol_version
@@ -325,6 +376,10 @@ def iteritems(self):
lambda name_value: (name_value[0], 'stateful') if name_value[0] == 'type' else (name_value[0], name_value[1]), iteritems)
return iteritems
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
+
pass
# endregion
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/internals.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/internals.py
index 8b76fc2..1ea2833 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/internals.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/internals.py
@@ -16,45 +16,63 @@
from __future__ import absolute_import, division, print_function
+from io import TextIOWrapper
from collections import deque, namedtuple
-from .. import six
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from ..ordereddict import OrderedDict
-from ..six.moves import StringIO
+from splunklib import six
+from collections import OrderedDict
+from splunklib.six.moves import StringIO
from itertools import chain
-from ..six.moves import map as imap
+from splunklib.six.moves import map as imap
from json import JSONDecoder, JSONEncoder
from json.encoder import encode_basestring_ascii as json_encode_string
-from ..six.moves import urllib
+from splunklib.six.moves import urllib
import csv
import gzip
import os
import re
import sys
+import warnings
from . import environment
csv.field_size_limit(10485760) # The default value is 128KB; upping to 10MB. See SPL-12117 for background on this issue
-if sys.platform == 'win32':
- # Work around the fact that on Windows '\n' is mapped to '\r\n'. The typical solution is to simply open files in
- # binary mode, but stdout is already open, thus this hack. 'CPython' and 'PyPy' work differently. We assume that
- # all other Python implementations are compatible with 'CPython'. This might or might not be a valid assumption.
- from platform import python_implementation
- implementation = python_implementation()
- fileno = sys.stdout.fileno()
- if implementation == 'PyPy':
- sys.stdout = os.fdopen(fileno, 'wb', 0)
- else:
- from msvcrt import setmode
- setmode(fileno, os.O_BINARY)
+
+def set_binary_mode(fh):
+ """ Helper method to set up binary mode for file handles.
+ Emphasis being sys.stdin, sys.stdout, sys.stderr.
+ For python3, we want to return .buffer
+ For python2+windows we want to set os.O_BINARY
+ """
+ typefile = TextIOWrapper if sys.version_info >= (3, 0) else file
+ # check for file handle
+ if not isinstance(fh, typefile):
+ return fh
+
+ # check for python3 and buffer
+ if sys.version_info >= (3, 0) and hasattr(fh, 'buffer'):
+ return fh.buffer
+ # check for python3
+ elif sys.version_info >= (3, 0):
+ pass
+ # check for windows python2. SPL-175233 -- python3 stdout is already binary
+ elif sys.platform == 'win32':
+ # Work around the fact that on Windows '\n' is mapped to '\r\n'. The typical solution is to simply open files in
+ # binary mode, but stdout is already open, thus this hack. 'CPython' and 'PyPy' work differently. We assume that
+ # all other Python implementations are compatible with 'CPython'. This might or might not be a valid assumption.
+ from platform import python_implementation
+ implementation = python_implementation()
+ if implementation == 'PyPy':
+ return os.fdopen(fh.fileno(), 'wb', 0)
+ else:
+ import msvcrt
+ msvcrt.setmode(fh.fileno(), os.O_BINARY)
+ return fh
class CommandLineParser(object):
- """ Parses the arguments to a search command.
+ r""" Parses the arguments to a search command.
A search command line is described by the following syntax.
@@ -212,7 +230,7 @@ def replace(match):
_escaped_character_re = re.compile(r'(\\.|""|[\\"])')
- _fieldnames_re = re.compile(r"""("(?:\\.|""|[^"])+"|(?:\\.|[^\s"])+)""")
+ _fieldnames_re = re.compile(r"""("(?:\\.|""|[^"\\])+"|(?:\\.|[^\s"])+)""")
_options_re = re.compile(r"""
# Captures a set of name/value pairs when used with re.finditer
@@ -339,6 +357,8 @@ class CsvDialect(csv.Dialect):
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
+ if sys.version_info >= (3, 0) and sys.platform == 'win32':
+ lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
@@ -346,6 +366,7 @@ class InputHeader(dict):
""" Represents a Splunk input header as a collection of name/value pairs.
"""
+
def __str__(self):
return '\n'.join([name + ':' + value for name, value in six.iteritems(self)])
@@ -373,7 +394,8 @@ def read(self, ifile):
# continuation of the current item
value += urllib.parse.unquote(line)
- if name is not None: self[name] = value[:-1] if value[-1] == '\n' else value
+ if name is not None:
+ self[name] = value[:-1] if value[-1] == '\n' else value
Message = namedtuple('Message', ('type', 'text'))
@@ -470,7 +492,7 @@ class RecordWriter(object):
def __init__(self, ofile, maxresultrows=None):
self._maxresultrows = 50000 if maxresultrows is None else maxresultrows
- self._ofile = ofile
+ self._ofile = set_binary_mode(ofile)
self._fieldnames = None
self._buffer = StringIO()
@@ -481,8 +503,9 @@ def __init__(self, ofile, maxresultrows=None):
self._inspector = OrderedDict()
self._chunk_count = 0
- self._record_count = 0
- self._total_record_count = 0
+ self._pending_record_count = 0
+ self._committed_record_count = 0
+ self.custom_fields = set()
@property
def is_flushed(self):
@@ -498,7 +521,37 @@ def ofile(self):
@ofile.setter
def ofile(self, value):
- self._ofile = value
+ self._ofile = set_binary_mode(value)
+
+ @property
+ def pending_record_count(self):
+ return self._pending_record_count
+
+ @property
+ def _record_count(self):
+ warnings.warn(
+ "_record_count will be deprecated soon. Use pending_record_count instead.",
+ PendingDeprecationWarning
+ )
+ return self.pending_record_count
+
+ @property
+ def committed_record_count(self):
+ return self._committed_record_count
+
+ @property
+ def _total_record_count(self):
+ warnings.warn(
+ "_total_record_count will be deprecated soon. Use committed_record_count instead.",
+ PendingDeprecationWarning
+ )
+ return self.committed_record_count
+
+ def write(self, data):
+ bytes_type = bytes if sys.version_info >= (3, 0) else str
+ if not isinstance(data, bytes_type):
+ data = data.encode('utf-8')
+ self.ofile.write(data)
def flush(self, finished=None, partial=None):
assert finished is None or isinstance(finished, bool)
@@ -517,6 +570,7 @@ def write_record(self, record):
def write_records(self, records):
self._ensure_validity()
+ records = list(records)
write_record = self._write_record
for record in records:
write_record(record)
@@ -525,8 +579,7 @@ def _clear(self):
self._buffer.seek(0)
self._buffer.truncate()
self._inspector.clear()
- self._record_count = 0
- self._flushed = False
+ self._pending_record_count = 0
def _ensure_validity(self):
if self._finished is True:
@@ -539,6 +592,7 @@ def _write_record(self, record):
if fieldnames is None:
self._fieldnames = fieldnames = list(record.keys())
+ self._fieldnames.extend([i for i in self.custom_fields if i not in self._fieldnames])
value_list = imap(lambda fn: (str(fn), str('__mv_') + str(fn)), fieldnames)
self._writerow(list(chain.from_iterable(value_list)))
@@ -580,7 +634,7 @@ def _write_record(self, record):
value = str(value.real)
elif value_t is six.text_type:
value = value
- elif value_t is int or value_t is int or value_t is float or value_t is complex:
+ elif isinstance(value, six.integer_types) or value_t is float or value_t is complex:
value = str(value)
elif issubclass(value_t, (dict, list, tuple)):
value = str(''.join(RecordWriter._iterencode_json(value, 0)))
@@ -610,7 +664,7 @@ def _write_record(self, record):
values += (value, None)
continue
- if value_t is int or value_t is int or value_t is float or value_t is complex:
+ if isinstance(value, six.integer_types) or value_t is float or value_t is complex:
values += (str(value), None)
continue
@@ -621,9 +675,9 @@ def _write_record(self, record):
values += (repr(value), None)
self._writerow(values)
- self._record_count += 1
+ self._pending_record_count += 1
- if self._record_count >= self._maxresultrows:
+ if self.pending_record_count >= self._maxresultrows:
self.flush(partial=True)
try:
@@ -660,10 +714,9 @@ def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
- if self._record_count > 0 or (self._chunk_count == 0 and 'messages' in self._inspector):
+ if self.pending_record_count > 0 or (self._chunk_count == 0 and 'messages' in self._inspector):
messages = self._inspector.get('messages')
- write = self._ofile.write
if self._chunk_count == 0:
@@ -675,12 +728,12 @@ def flush(self, finished=None, partial=None):
message_level = RecordWriterV1._message_level.get
for level, text in messages:
- write(message_level(level, level))
- write('=')
- write(text)
- write('\r\n')
+ self.write(message_level(level, level))
+ self.write('=')
+ self.write(text)
+ self.write('\r\n')
- write('\r\n')
+ self.write('\r\n')
elif messages is not None:
@@ -698,10 +751,10 @@ def flush(self, finished=None, partial=None):
for level, text in messages:
print(level, text, file=stderr)
- write(self._buffer.getvalue())
- self._clear()
+ self.write(self._buffer.getvalue())
self._chunk_count += 1
- self._total_record_count += self._record_count
+ self._committed_record_count += self.pending_record_count
+ self._clear()
self._finished = finished is True
@@ -719,44 +772,43 @@ class RecordWriterV2(RecordWriter):
def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
- inspector = self._inspector
-
- if self._flushed is False:
-
- self._total_record_count += self._record_count
- self._chunk_count += 1
-
- # TODO: DVPL-6448: splunklib.searchcommands | Add support for partial: true when it is implemented in
- # ChunkedExternProcessor (See SPL-103525)
- #
- # We will need to replace the following block of code with this block:
- #
- # metadata = [
- # ('inspector', self._inspector if len(self._inspector) else None),
- # ('finished', finished),
- # ('partial', partial)]
- if len(inspector) == 0:
- inspector = None
-
- if partial is True:
- finished = False
-
- metadata = [item for item in (('inspector', inspector), ('finished', finished))]
- self._write_chunk(metadata, self._buffer.getvalue())
- self._clear()
+ if partial or not finished:
+ # Don't flush partial chunks, since the SCP v2 protocol does not
+ # provide a way to send partial chunks yet.
+ return
- elif finished is True:
- self._write_chunk((('finished', True),), '')
+ if not self.is_flushed:
+ self.write_chunk(finished=True)
- self._finished = finished is True
+ def write_chunk(self, finished=None):
+ inspector = self._inspector
+ self._committed_record_count += self.pending_record_count
+ self._chunk_count += 1
+
+ # TODO: DVPL-6448: splunklib.searchcommands | Add support for partial: true when it is implemented in
+ # ChunkedExternProcessor (See SPL-103525)
+ #
+ # We will need to replace the following block of code with this block:
+ #
+ # metadata = [item for item in (('inspector', inspector), ('finished', finished), ('partial', partial))]
+ #
+ # if partial is True:
+ # finished = False
+
+ if len(inspector) == 0:
+ inspector = None
+
+ metadata = [item for item in (('inspector', inspector), ('finished', finished))]
+ self._write_chunk(metadata, self._buffer.getvalue())
+ self._clear()
def write_metadata(self, configuration):
self._ensure_validity()
metadata = chain(six.iteritems(configuration), (('inspector', self._inspector if self._inspector else None),))
self._write_chunk(metadata, '')
- self._ofile.write('\n')
+ self.write('\n')
self._clear()
def write_metric(self, name, value):
@@ -764,26 +816,29 @@ def write_metric(self, name, value):
self._inspector['metric.' + name] = value
def _clear(self):
- RecordWriter._clear(self)
+ super(RecordWriterV2, self)._clear()
self._fieldnames = None
def _write_chunk(self, metadata, body):
if metadata:
metadata = str(''.join(self._iterencode_json(dict([(n, v) for n, v in metadata if v is not None]), 0)))
+ if sys.version_info >= (3, 0):
+ metadata = metadata.encode('utf-8')
metadata_length = len(metadata)
else:
metadata_length = 0
+ if sys.version_info >= (3, 0):
+ body = body.encode('utf-8')
body_length = len(body)
if not (metadata_length > 0 or body_length > 0):
return
start_line = 'chunked 1.0,%s,%s\n' % (metadata_length, body_length)
- write = self._ofile.write
- write(start_line)
- write(metadata)
- write(body)
+ self.write(start_line)
+ self.write(metadata)
+ self.write(body)
self._ofile.flush()
- self._flushed = False
+ self._flushed = True
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/reporting_command.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/reporting_command.py
index b9fb2af..9470861 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/reporting_command.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/reporting_command.py
@@ -23,7 +23,7 @@
from .streaming_command import StreamingCommand
from .search_command import SearchCommand
from .validators import Set
-from .. import six
+from splunklib import six
class ReportingCommand(SearchCommand):
@@ -253,7 +253,7 @@ def fix_up(cls, command):
cls._requires_preop = False
return
- f = vars(command)[b'map'] # Function backing the map method
+ f = vars(command)['map'] # Function backing the map method
# EXPLANATION OF PREVIOUS STATEMENT: There is no way to add custom attributes to methods. See [Why does
# setattr fail on a method](http://stackoverflow.com/questions/7891277/why-does-setattr-fail-on-a-bound-method) for a discussion of this issue.
@@ -266,7 +266,7 @@ def fix_up(cls, command):
# Create new StreamingCommand.ConfigurationSettings class
- module = command.__module__ + b'.' + command.__name__ + b'.map'
+ module = command.__module__ + '.' + command.__name__ + '.map'
name = b'ConfigurationSettings'
bases = (StreamingCommand.ConfigurationSettings,)
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/search_command.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/search_command.py
index 965e894..dd11391 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/search_command.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/search_command.py
@@ -22,15 +22,12 @@
import io
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from ..ordereddict import OrderedDict
+from collections import OrderedDict
from copy import deepcopy
-from ..six.moves import StringIO
+from splunklib.six.moves import StringIO
from itertools import chain, islice
-from ..six.moves import filter as ifilter, map as imap, zip as izip
-from .. import six
+from splunklib.six.moves import filter as ifilter, map as imap, zip as izip
+from splunklib import six
if six.PY2:
from logging import _levelNames, getLevelName, getLogger
else:
@@ -41,8 +38,8 @@
# Used for recording, skip on python 2.6
pass
from time import time
-from ..six.moves.urllib.parse import unquote
-from ..six.moves.urllib.parse import urlsplit
+from splunklib.six.moves.urllib.parse import unquote
+from splunklib.six.moves.urllib.parse import urlsplit
from warnings import warn
from xml.etree import ElementTree
@@ -124,6 +121,7 @@ def __init__(self):
self._default_logging_level = self._logger.level
self._record_writer = None
self._records = None
+ self._allow_empty_input = True
def __str__(self):
text = ' '.join(chain((type(self).name, str(self.options)), [] if self.fieldnames is None else self.fieldnames))
@@ -172,6 +170,14 @@ def logging_level(self, value):
raise ValueError('Unrecognized logging level: {}'.format(value))
self._logger.setLevel(level)
+ def add_field(self, current_record, field_name, field_value):
+ self._record_writer.custom_fields.add(field_name)
+ current_record[field_name] = field_value
+
+ def gen_record(self, **record):
+ self._record_writer.custom_fields |= set(record.keys())
+ return record
+
record = Option(doc='''
**Syntax: record=
@@ -256,7 +262,7 @@ def search_results_info(self):
invocation.
:return: Search results info:const:`None`, if the search results info file associated with the command
- invocation is inaccessible.
+ invocation is inaccessible.
:rtype: SearchResultsInfo or NoneType
"""
@@ -338,6 +344,7 @@ def service(self):
specifying this pair of configuration settings in commands.conf:
.. code-block:: python
+
enableheader = true
requires_srinfo = true
@@ -345,8 +352,8 @@ def service(self):
:code:`requires_srinfo` setting is false by default. Hence, you must set it.
:return: :class:`splunklib.client.Service`, if :code:`enableheader` and :code:`requires_srinfo` are both
- :code:`true`. Otherwise, if either :code:`enableheader` or :code:`requires_srinfo` are :code:`false`, a value
- of :code:`None` is returned.
+ :code:`true`. Otherwise, if either :code:`enableheader` or :code:`requires_srinfo` are :code:`false`, a value
+ of :code:`None` is returned.
"""
if self._service is not None:
@@ -397,7 +404,7 @@ def flush(self):
:return: :const:`None`
"""
- self._record_writer.flush(partial=True)
+ self._record_writer.flush(finished=False)
def prepare(self):
""" Prepare for execution.
@@ -412,7 +419,7 @@ def prepare(self):
"""
pass
- def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout):
+ def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout, allow_empty_input=True):
""" Process data.
:param argv: Command line arguments.
@@ -424,10 +431,16 @@ def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout):
:param ofile: Output data file.
:type ofile: file
+ :param allow_empty_input: Allow empty input records for the command, if False an Error will be returned if empty chunk body is encountered when read
+ :type allow_empty_input: bool
+
:return: :const:`None`
:rtype: NoneType
"""
+
+ self._allow_empty_input = allow_empty_input
+
if len(argv) > 1:
self._process_protocol_v1(argv, ifile, ofile)
else:
@@ -633,6 +646,19 @@ def _process_protocol_v1(self, argv, ifile, ofile):
debug('%s.process finished under protocol_version=1', class_name)
+ def _protocol_v2_option_parser(self, arg):
+ """ Determines if an argument is an Option/Value pair, or just a Positional Argument.
+ Method so different search commands can handle parsing of arguments differently.
+
+ :param arg: A single argument provided to the command from SPL
+ :type arg: str
+
+ :return: [OptionName, OptionValue] OR [PositionalArgument]
+ :rtype: List[str]
+
+ """
+ return arg.split('=', 1)
+
def _process_protocol_v2(self, argv, ifile, ofile):
""" Processes records on the `input stream optionally writing records to the output stream.
@@ -655,7 +681,7 @@ def _process_protocol_v2(self, argv, ifile, ofile):
# noinspection PyBroadException
try:
debug('Reading metadata')
- metadata, body = self._read_chunk(ifile)
+ metadata, body = self._read_chunk(self._as_binary_stream(ifile))
action = getattr(metadata, 'action', None)
@@ -703,7 +729,7 @@ def _process_protocol_v2(self, argv, ifile, ofile):
if args and type(args) == list:
for arg in args:
- result = arg.split('=', 1)
+ result = self._protocol_v2_option_parser(arg)
if len(result) == 1:
self.fieldnames.append(str(result[0]))
else:
@@ -775,7 +801,6 @@ def _process_protocol_v2(self, argv, ifile, ofile):
# noinspection PyBroadException
try:
debug('Executing under protocol_version=2')
- self._records = self._records_protocol_v2
self._metadata.action = 'execute'
self._execute(ifile, None)
except SystemExit:
@@ -809,15 +834,15 @@ def write_metric(self, name, value):
:param name: Name of the metric.
:type name: basestring
- :param value: A 4-tuple containing the value of metric :param:`name` where
+ :param value: A 4-tuple containing the value of metric ``name`` where
value[0] = Elapsed seconds or :const:`None`.
value[1] = Number of invocations or :const:`None`.
value[2] = Input count or :const:`None`.
value[3] = Output count or :const:`None`.
- The :data:`SearchMetric` type provides a convenient encapsulation of :param:`value`.
- The :data:`SearchMetric` type provides a convenient encapsulation of :param:`value`.
+ The :data:`SearchMetric` type provides a convenient encapsulation of ``value``.
+ The :data:`SearchMetric` type provides a convenient encapsulation of ``value``.
:return: :const:`None`.
@@ -832,6 +857,8 @@ def _decode_list(mv):
_encoded_value = re.compile(r'\$(?P- (?:\$\$|[^$])*)\$(?:;|$)') # matches a single value in an encoded list
+ # Note: Subclasses must override this method so that it can be called
+ # called as self._execute(ifile, None)
def _execute(self, ifile, process):
""" Default processing loop
@@ -845,21 +872,38 @@ def _execute(self, ifile, process):
:rtype: NoneType
"""
- self._record_writer.write_records(process(self._records(ifile)))
- self.finish()
+ if self.protocol_version == 1:
+ self._record_writer.write_records(process(self._records(ifile)))
+ self.finish()
+ else:
+ assert self._protocol_version == 2
+ self._execute_v2(ifile, process)
+
+ @staticmethod
+ def _as_binary_stream(ifile):
+ naught = ifile.read(0)
+ if isinstance(naught, bytes):
+ return ifile
+
+ try:
+ return ifile.buffer
+ except AttributeError as error:
+ raise RuntimeError('Failed to get underlying buffer: {}'.format(error))
@staticmethod
- def _read_chunk(ifile):
+ def _read_chunk(istream):
# noinspection PyBroadException
+ assert isinstance(istream.read(0), six.binary_type), 'Stream must be binary'
+
try:
- header = ifile.readline()
+ header = istream.readline()
except Exception as error:
raise RuntimeError('Failed to read transport header: {}'.format(error))
if not header:
return None
- match = SearchCommand._header.match(header)
+ match = SearchCommand._header.match(six.ensure_str(header))
if match is None:
raise RuntimeError('Failed to parse transport header: {}'.format(header))
@@ -869,14 +913,14 @@ def _read_chunk(ifile):
body_length = int(body_length)
try:
- metadata = ifile.read(metadata_length)
+ metadata = istream.read(metadata_length)
except Exception as error:
raise RuntimeError('Failed to read metadata of length {}: {}'.format(metadata_length, error))
decoder = MetadataDecoder()
try:
- metadata = decoder.decode(metadata)
+ metadata = decoder.decode(six.ensure_str(metadata))
except Exception as error:
raise RuntimeError('Failed to parse metadata of length {}: {}'.format(metadata_length, error))
@@ -886,16 +930,18 @@ def _read_chunk(ifile):
body = ""
try:
if body_length > 0:
- body = ifile.read(body_length)
+ body = istream.read(body_length)
except Exception as error:
raise RuntimeError('Failed to read body of length {}: {}'.format(body_length, error))
- return metadata, body
+ return metadata, six.ensure_str(body)
_header = re.compile(r'chunked\s+1.0\s*,\s*(\d+)\s*,\s*(\d+)\s*\n')
def _records_protocol_v1(self, ifile):
+ return self._read_csv_records(ifile)
+ def _read_csv_records(self, ifile):
reader = csv.reader(ifile, dialect=CsvDialect)
try:
@@ -920,51 +966,37 @@ def _records_protocol_v1(self, ifile):
record[fieldname] = value
yield record
- def _records_protocol_v2(self, ifile):
+ def _execute_v2(self, ifile, process):
+ istream = self._as_binary_stream(ifile)
while True:
- result = self._read_chunk(ifile)
+ result = self._read_chunk(istream)
if not result:
return
metadata, body = result
action = getattr(metadata, 'action', None)
-
if action != 'execute':
raise RuntimeError('Expected execute action, not {}'.format(action))
- finished = getattr(metadata, 'finished', False)
+ self._finished = getattr(metadata, 'finished', False)
self._record_writer.is_flushed = False
- if len(body) > 0:
- reader = csv.reader(StringIO(body), dialect=CsvDialect)
+ self._execute_chunk_v2(process, result)
- try:
- fieldnames = next(reader)
- except StopIteration:
- return
+ self._record_writer.write_chunk(finished=self._finished)
- mv_fieldnames = dict([(name, name[len('__mv_'):]) for name in fieldnames if name.startswith('__mv_')])
+ def _execute_chunk_v2(self, process, chunk):
+ metadata, body = chunk
- if len(mv_fieldnames) == 0:
- for values in reader:
- yield OrderedDict(izip(fieldnames, values))
- else:
- for values in reader:
- record = OrderedDict()
- for fieldname, value in izip(fieldnames, values):
- if fieldname.startswith('__mv_'):
- if len(value) > 0:
- record[mv_fieldnames[fieldname]] = self._decode_list(value)
- elif fieldname not in record:
- record[fieldname] = value
- yield record
-
- if finished:
- return
+ if len(body) <= 0 and not self._allow_empty_input:
+ raise ValueError(
+ "No records found to process. Set allow_empty_input=True in dispatch function to move forward "
+ "with empty records.")
- self.flush()
+ records = self._read_csv_records(StringIO(body))
+ self._record_writer.write_records(process(records))
def _report_unexpected_error(self):
@@ -1035,6 +1067,8 @@ def fix_up(cls, command_class):
"""
return
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
definitions = type(self).configuration_setting_definitions
version = self.command.protocol_version
@@ -1043,7 +1077,9 @@ def iteritems(self):
lambda setting: (setting.name, setting.__get__(self)), ifilter(
lambda setting: setting.is_supported_by_protocol(version), definitions)))
- items = iteritems
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
pass # endregion
@@ -1053,7 +1089,7 @@ def iteritems(self):
SearchMetric = namedtuple('SearchMetric', ('elapsed_seconds', 'invocation_count', 'input_count', 'output_count'))
-def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys.stdout, module_name=None):
+def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys.stdout, module_name=None, allow_empty_input=True):
""" Instantiates and executes a search command class
This function implements a `conditional script stanza `_ based on the value of
@@ -1076,11 +1112,13 @@ def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys
:type output_file: :code:`file`
:param module_name: Name of the module calling :code:`dispatch` or :const:`None`.
:type module_name: :code:`basestring`
+ :param allow_empty_input: Allow empty input records for the command, if False an Error will be returned if empty chunk body is encountered when read
+ :type allow_empty_input: bool
:returns: :const:`None`
**Example**
- .. code-block:: python
+ .. code-block:: python
:linenos:
#!/usr/bin/env python
@@ -1096,7 +1134,7 @@ def stream(records):
**Example**
- .. code-block:: python
+ .. code-block:: python
:linenos:
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option, validators
@@ -1113,4 +1151,4 @@ def stream(records):
assert issubclass(command_class, SearchCommand)
if module_name is None or module_name == '__main__':
- command_class().process(argv, input_file, output_file)
+ command_class().process(argv, input_file, output_file, allow_empty_input)
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/streaming_command.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/streaming_command.py
index cf5c0f4..fa075ed 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/streaming_command.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/streaming_command.py
@@ -16,7 +16,8 @@
from __future__ import absolute_import, division, print_function, unicode_literals
-from ..six.moves import map as imap, filter as ifilter
+from splunklib import six
+from splunklib.six.moves import map as imap, filter as ifilter
from .decorators import ConfigurationSetting
from .search_command import SearchCommand
@@ -172,6 +173,8 @@ def fix_up(cls, command):
raise AttributeError('No StreamingCommand.stream override')
return
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
iteritems = SearchCommand.ConfigurationSettings.iteritems(self)
version = self.command.protocol_version
@@ -185,4 +188,8 @@ def iteritems(self):
lambda name_value1: (name_value1[0], 'stateful') if name_value1[0] == 'type' else (name_value1[0], name_value1[1]), iteritems)
return iteritems
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
+
# endregion
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/validators.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/validators.py
index b5fddc7..22f0e16 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/validators.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/searchcommands/validators.py
@@ -18,13 +18,13 @@
from json.encoder import encode_basestring_ascii as json_encode_string
from collections import namedtuple
-from ..six.moves import StringIO
+from splunklib.six.moves import StringIO
from io import open
import csv
import os
import re
-from .. import six
-from ..six.moves import getcwd
+from splunklib import six
+from splunklib.six.moves import getcwd
class Validator(object):
@@ -81,9 +81,9 @@ class Code(Validator):
def __init__(self, mode='eval'):
"""
:param mode: Specifies what kind of code must be compiled; it can be :const:`'exec'`, if source consists of a
- sequence of statements, :const:`'eval'`, if it consists of a single expression, or :const:`'single'` if it
- consists of a single interactive statement. In the latter case, expression statements that evaluate to
- something other than :const:`None` will be printed.
+ sequence of statements, :const:`'eval'`, if it consists of a single expression, or :const:`'single'` if it
+ consists of a single interactive statement. In the latter case, expression statements that evaluate to
+ something other than :const:`None` will be printed.
:type mode: unicode or bytes
"""
@@ -95,7 +95,9 @@ def __call__(self, value):
try:
return Code.object(compile(value, 'string', self._mode), six.text_type(value))
except (SyntaxError, TypeError) as error:
- raise ValueError(error.message)
+ message = str(error)
+
+ six.raise_from(ValueError(message), error)
def format(self, value):
return None if value is None else value.source
@@ -199,6 +201,48 @@ def format(self, value):
return None if value is None else six.text_type(int(value))
+class Float(Validator):
+ """ Validates float option values.
+
+ """
+ def __init__(self, minimum=None, maximum=None):
+ if minimum is not None and maximum is not None:
+ def check_range(value):
+ if not (minimum <= value <= maximum):
+ raise ValueError('Expected float in the range [{0},{1}], not {2}'.format(minimum, maximum, value))
+ return
+ elif minimum is not None:
+ def check_range(value):
+ if value < minimum:
+ raise ValueError('Expected float in the range [{0},+∞], not {1}'.format(minimum, value))
+ return
+ elif maximum is not None:
+ def check_range(value):
+ if value > maximum:
+ raise ValueError('Expected float in the range [-∞,{0}], not {1}'.format(maximum, value))
+ return
+ else:
+ def check_range(value):
+ return
+
+ self.check_range = check_range
+ return
+
+ def __call__(self, value):
+ if value is None:
+ return None
+ try:
+ value = float(value)
+ except ValueError:
+ raise ValueError('Expected float value, not {}'.format(json_encode_string(value)))
+
+ self.check_range(value)
+ return value
+
+ def format(self, value):
+ return None if value is None else six.text_type(float(value))
+
+
class Duration(Validator):
""" Validates duration option values.
@@ -249,10 +293,10 @@ class List(Validator):
class Dialect(csv.Dialect):
""" Describes the properties of list option values. """
strict = True
- delimiter = b','
- quotechar = b'"'
+ delimiter = str(',')
+ quotechar = str('"')
doublequote = True
- lineterminator = b'\n'
+ lineterminator = str('\n')
skipinitialspace = True
quoting = csv.QUOTE_MINIMAL
@@ -386,4 +430,4 @@ def format(self, value):
return self.__call__(value)
-__all__ = ['Boolean', 'Code', 'Duration', 'File', 'Integer', 'List', 'Map', 'RegularExpression', 'Set']
+__all__ = ['Boolean', 'Code', 'Duration', 'File', 'Integer', 'Float', 'List', 'Map', 'RegularExpression', 'Set']
diff --git a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/six.py b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/six.py
index 190c023..d13e50c 100644
--- a/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/six.py
+++ b/bin/ta_dmarc/aob_py3/solnlib/packages/splunklib/six.py
@@ -1,6 +1,4 @@
-"""Utilities for writing code that runs on Python 2 and 3"""
-
-# Copyright (c) 2010-2015 Benjamin Peterson
+# Copyright (c) 2010-2020 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@@ -20,6 +18,8 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
+"""Utilities for writing code that runs on Python 2 and 3"""
+
from __future__ import absolute_import
import functools
@@ -29,7 +29,7 @@
import types
__author__ = "Benjamin Peterson "
-__version__ = "1.10.0"
+__version__ = "1.14.0"
# Useful for very coarse version differentiation.
@@ -241,6 +241,7 @@ class _MovedItems(_LazyModule):
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("getoutput", "commands", "subprocess"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
@@ -254,18 +255,21 @@ class _MovedItems(_LazyModule):
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
+ MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
- MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+ MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
- MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
@@ -337,10 +341,12 @@ class Module_six_moves_urllib_parse(_LazyModule):
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("splitvalue", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
@@ -416,6 +422,8 @@ class Module_six_moves_urllib_request(_LazyModule):
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+ MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
+ MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
@@ -631,13 +639,16 @@ def u(s):
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
+ del io
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
+ _assertNotRegex = "assertNotRegex"
else:
def b(s):
return s
@@ -659,6 +670,7 @@ def indexbytes(buf, i):
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
@@ -675,15 +687,23 @@ def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
+def assertNotRegex(self, *args, **kwargs):
+ return getattr(self, _assertNotRegex)(*args, **kwargs)
+
+
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
- if value is None:
- value = tp()
- if value.__traceback__ is not tb:
- raise value.with_traceback(tb)
- raise value
+ try:
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+ finally:
+ value = None
+ tb = None
else:
def exec_(_code_, _globs_=None, _locs_=None):
@@ -699,19 +719,19 @@ def exec_(_code_, _globs_=None, _locs_=None):
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
- raise tp, value, tb
+ try:
+ raise tp, value, tb
+ finally:
+ tb = None
""")
-if sys.version_info[:2] == (3, 2):
+if sys.version_info[:2] > (3,):
exec_("""def raise_from(value, from_value):
- if from_value is None:
- raise value
- raise value from from_value
-""")
-elif sys.version_info[:2] > (3, 2):
- exec_("""def raise_from(value, from_value):
- raise value from from_value
+ try:
+ raise value from from_value
+ finally:
+ value = None
""")
else:
def raise_from(value, from_value):
@@ -786,13 +806,33 @@ def print_(*args, **kwargs):
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
+ # This does exactly the same what the :func:`py3:functools.update_wrapper`
+ # function does on Python versions after 3.2. It sets the ``__wrapped__``
+ # attribute on ``wrapper`` object and it doesn't raise an error if any of
+ # the attributes mentioned in ``assigned`` and ``updated`` are missing on
+ # ``wrapped`` object.
+ def _update_wrapper(wrapper, wrapped,
+ assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ for attr in assigned:
+ try:
+ value = getattr(wrapped, attr)
+ except AttributeError:
+ continue
+ else:
+ setattr(wrapper, attr, value)
+ for attr in updated:
+ getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+ wrapper.__wrapped__ = wrapped
+ return wrapper
+ _update_wrapper.__doc__ = functools.update_wrapper.__doc__
+
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
- def wrapper(f):
- f = functools.wraps(wrapped, assigned, updated)(f)
- f.__wrapped__ = wrapped
- return f
- return wrapper
+ return functools.partial(_update_wrapper, wrapped=wrapped,
+ assigned=assigned, updated=updated)
+ wraps.__doc__ = functools.wraps.__doc__
+
else:
wraps = functools.wraps
@@ -802,10 +842,22 @@ def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
- class metaclass(meta):
+ class metaclass(type):
def __new__(cls, name, this_bases, d):
- return meta(name, bases, d)
+ if sys.version_info[:2] >= (3, 7):
+ # This version introduced PEP 560 that requires a bit
+ # of extra care (we mimic what is done by __build_class__).
+ resolved_bases = types.resolve_bases(bases)
+ if resolved_bases is not bases:
+ d['__orig_bases__'] = bases
+ else:
+ resolved_bases = bases
+ return meta(name, resolved_bases, d)
+
+ @classmethod
+ def __prepare__(cls, name, this_bases):
+ return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
@@ -821,13 +873,73 @@ def wrapper(cls):
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
+ if hasattr(cls, '__qualname__'):
+ orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
+def ensure_binary(s, encoding='utf-8', errors='strict'):
+ """Coerce **s** to six.binary_type.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> encoded to `bytes`
+ - `bytes` -> `bytes`
+ """
+ if isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ elif isinstance(s, binary_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def ensure_str(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to `str`.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if not isinstance(s, (text_type, binary_type)):
+ raise TypeError("not expecting type '%s'" % type(s))
+ if PY2 and isinstance(s, text_type):
+ s = s.encode(encoding, errors)
+ elif PY3 and isinstance(s, binary_type):
+ s = s.decode(encoding, errors)
+ return s
+
+
+def ensure_text(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to six.text_type.
+
+ For Python 2:
+ - `unicode` -> `unicode`
+ - `str` -> `unicode`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif isinstance(s, text_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
def python_2_unicode_compatible(klass):
"""
- A decorator that defines __unicode__ and __str__ methods under Python 2.
+ A class decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
@@ -866,3 +978,16 @@ def python_2_unicode_compatible(klass):
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
+
+import warnings
+
+def deprecated(message):
+ def deprecated_decorator(func):
+ def deprecated_func(*args, **kwargs):
+ warnings.warn("{} is a deprecated function. {}".format(func.__name__, message),
+ category=DeprecationWarning,
+ stacklevel=2)
+ warnings.simplefilter('default', DeprecationWarning)
+ return func(*args, **kwargs)
+ return deprecated_func
+ return deprecated_decorator
\ No newline at end of file
diff --git a/bin/ta_dmarc/aob_py3/splunklib/__init__.py b/bin/ta_dmarc/aob_py3/splunklib/__init__.py
index 525dc8e..1f9fc68 100644
--- a/bin/ta_dmarc/aob_py3/splunklib/__init__.py
+++ b/bin/ta_dmarc/aob_py3/splunklib/__init__.py
@@ -16,5 +16,20 @@
from __future__ import absolute_import
from splunklib.six.moves import map
-__version_info__ = (1, 6, 16)
+import logging
+
+DEFAULT_LOG_FORMAT = '%(asctime)s, Level=%(levelname)s, Pid=%(process)s, Logger=%(name)s, File=%(filename)s, ' \
+ 'Line=%(lineno)s, %(message)s'
+DEFAULT_DATE_FORMAT = '%Y-%m-%d %H:%M:%S %Z'
+
+
+# To set the logging level of splunklib
+# ex. To enable debug logs, call this method with parameter 'logging.DEBUG'
+# default logging level is set to 'WARNING'
+def setup_logging(level, log_format=DEFAULT_LOG_FORMAT, date_format=DEFAULT_DATE_FORMAT):
+ logging.basicConfig(level=level,
+ format=log_format,
+ datefmt=date_format)
+
+__version_info__ = (1, 6, 20)
__version__ = ".".join(map(str, __version_info__))
diff --git a/bin/ta_dmarc/aob_py3/splunklib/binding.py b/bin/ta_dmarc/aob_py3/splunklib/binding.py
index c3121fb..bb2771d 100644
--- a/bin/ta_dmarc/aob_py3/splunklib/binding.py
+++ b/bin/ta_dmarc/aob_py3/splunklib/binding.py
@@ -31,6 +31,7 @@
import socket
import ssl
import sys
+import time
from base64 import b64encode
from contextlib import contextmanager
from datetime import datetime
@@ -39,7 +40,6 @@
from xml.etree.ElementTree import XML
from splunklib import six
-from splunklib.six import StringIO
from splunklib.six.moves import urllib
from .data import record
@@ -49,6 +49,7 @@
except ImportError as e:
from xml.parsers.expat import ExpatError as ParseError
+logger = logging.getLogger(__name__)
__all__ = [
"AuthenticationError",
@@ -70,7 +71,7 @@ def new_f(*args, **kwargs):
start_time = datetime.now()
val = f(*args, **kwargs)
end_time = datetime.now()
- logging.debug("Operation took %s", end_time-start_time)
+ logger.debug("Operation took %s", end_time-start_time)
return val
return new_f
@@ -296,8 +297,7 @@ def wrapper(self, *args, **kwargs):
with _handle_auth_error("Autologin failed."):
self.login()
with _handle_auth_error(
- "Autologin succeeded, but there was an auth error on "
- "next request. Something is very wrong."):
+ "Authentication Failed! If session token is used, it seems to have been expired."):
return request_fun(self, *args, **kwargs)
elif he.status == 401 and not self.autologin:
raise AuthenticationError(
@@ -454,6 +454,12 @@ class Context(object):
:type splunkToken: ``string``
:param headers: List of extra HTTP headers to send (optional).
:type headers: ``list`` of 2-tuples.
+ :param retires: Number of retries for each HTTP connection (optional, the default is 0).
+ NOTE THAT THIS MAY INCREASE THE NUMBER OF ROUND TRIP CONNECTIONS TO THE SPLUNK SERVER AND BLOCK THE
+ CURRENT THREAD WHILE RETRYING.
+ :type retries: ``int``
+ :param retryDelay: How long to wait between connection attempts if `retries` > 0 (optional, defaults to 10s).
+ :type retryDelay: ``int`` (in seconds)
:param handler: The HTTP request handler (optional).
:returns: A ``Context`` instance.
@@ -471,7 +477,8 @@ class Context(object):
"""
def __init__(self, handler=None, **kwargs):
self.http = HttpLib(handler, kwargs.get("verify", False), key_file=kwargs.get("key_file"),
- cert_file=kwargs.get("cert_file")) # Default to False for backward compat
+ cert_file=kwargs.get("cert_file"), context=kwargs.get("context"), # Default to False for backward compat
+ retries=kwargs.get("retries", 0), retryDelay=kwargs.get("retryDelay", 10))
self.token = kwargs.get("token", _NoAuthenticationToken)
if self.token is None: # In case someone explicitly passes token=None
self.token = _NoAuthenticationToken
@@ -500,13 +507,13 @@ def get_cookies(self):
return self.http._cookies
def has_cookies(self):
- """Returns true if the ``HttpLib`` member of this instance has at least
- one cookie stored.
+ """Returns true if the ``HttpLib`` member of this instance has auth token stored.
- :return: ``True`` if there is at least one cookie, else ``False``
+ :return: ``True`` if there is auth token present, else ``False``
:rtype: ``bool``
"""
- return len(self.get_cookies()) > 0
+ auth_token_key = "splunkd_"
+ return any(auth_token_key in key for key in self.get_cookies().keys())
# Shared per-context request headers
@property
@@ -618,7 +625,7 @@ def delete(self, path_segment, owner=None, app=None, sharing=None, **query):
"""
path = self.authority + self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
- logging.debug("DELETE request to %s (body: %s)", path, repr(query))
+ logger.debug("DELETE request to %s (body: %s)", path, repr(query))
response = self.http.delete(path, self._auth_headers, **query)
return response
@@ -681,7 +688,7 @@ def get(self, path_segment, owner=None, app=None, headers=None, sharing=None, **
path = self.authority + self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
- logging.debug("GET request to %s (body: %s)", path, repr(query))
+ logger.debug("GET request to %s (body: %s)", path, repr(query))
all_headers = headers + self.additional_headers + self._auth_headers
response = self.http.get(path, all_headers, **query)
return response
@@ -759,14 +766,20 @@ def post(self, path_segment, owner=None, app=None, sharing=None, headers=None, *
headers = []
path = self.authority + self._abspath(path_segment, owner=owner, app=app, sharing=sharing)
- logging.debug("POST request to %s (body: %s)", path, repr(query))
+
+ # To avoid writing sensitive data in debug logs
+ endpoint_having_sensitive_data = ["/storage/passwords"]
+ if any(endpoint in path for endpoint in endpoint_having_sensitive_data):
+ logger.debug("POST request to %s ", path)
+ else:
+ logger.debug("POST request to %s (body: %s)", path, repr(query))
all_headers = headers + self.additional_headers + self._auth_headers
response = self.http.post(path, all_headers, **query)
return response
@_authentication
@_log_duration
- def request(self, path_segment, method="GET", headers=None, body="",
+ def request(self, path_segment, method="GET", headers=None, body={},
owner=None, app=None, sharing=None):
"""Issues an arbitrary HTTP request to the REST path segment.
@@ -795,9 +808,6 @@ def request(self, path_segment, method="GET", headers=None, body="",
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
- :param query: All other keyword arguments, which are used as query
- parameters.
- :type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
@@ -826,13 +836,28 @@ def request(self, path_segment, method="GET", headers=None, body="",
path = self.authority \
+ self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
+
all_headers = headers + self.additional_headers + self._auth_headers
- logging.debug("%s request to %s (headers: %s, body: %s)",
+ logger.debug("%s request to %s (headers: %s, body: %s)",
method, path, str(all_headers), repr(body))
- response = self.http.request(path,
- {'method': method,
- 'headers': all_headers,
- 'body': body})
+
+ if body:
+ body = _encode(**body)
+
+ if method == "GET":
+ path = path + UrlEncoded('?' + body, skip_encode=True)
+ message = {'method': method,
+ 'headers': all_headers}
+ else:
+ message = {'method': method,
+ 'headers': all_headers,
+ 'body': body}
+ else:
+ message = {'method': method,
+ 'headers': all_headers}
+
+ response = self.http.request(path, message)
+
return response
def login(self):
@@ -1070,7 +1095,7 @@ def __init__(self, message, cause):
#
# Encode the given kwargs as a query string. This wrapper will also _encode
-# a list value as a sequence of assignemnts to the corresponding arg name,
+# a list value as a sequence of assignments to the corresponding arg name,
# for example an argument such as 'foo=[1,2,3]' will be encoded as
# 'foo=1&foo=2&foo=3'.
def _encode(**kwargs):
@@ -1137,12 +1162,14 @@ class HttpLib(object):
If using the default handler, SSL verification can be disabled by passing verify=False.
"""
- def __init__(self, custom_handler=None, verify=False, key_file=None, cert_file=None):
+ def __init__(self, custom_handler=None, verify=False, key_file=None, cert_file=None, context=None, retries=0, retryDelay=10):
if custom_handler is None:
- self.handler = handler(verify=verify, key_file=key_file, cert_file=cert_file)
+ self.handler = handler(verify=verify, key_file=key_file, cert_file=cert_file, context=context)
else:
self.handler = custom_handler
self._cookies = {}
+ self.retries = retries
+ self.retryDelay = retryDelay
def delete(self, url, headers=None, **kwargs):
"""Sends a DELETE request to a URL.
@@ -1256,7 +1283,16 @@ def request(self, url, message, **kwargs):
its structure).
:rtype: ``dict``
"""
- response = self.handler(url, message, **kwargs)
+ while True:
+ try:
+ response = self.handler(url, message, **kwargs)
+ break
+ except Exception:
+ if self.retries <= 0:
+ raise
+ else:
+ time.sleep(self.retryDelay)
+ self.retries -= 1
response = record(response)
if 400 <= response.status:
raise HTTPError(response)
@@ -1292,7 +1328,10 @@ def __init__(self, response, connection=None):
self._buffer = b''
def __str__(self):
- return self.read()
+ if six.PY2:
+ return self.read()
+ else:
+ return str(self.read(), 'UTF-8')
@property
def empty(self):
@@ -1351,7 +1390,7 @@ def readinto(self, byte_array):
return bytes_read
-def handler(key_file=None, cert_file=None, timeout=None, verify=False):
+def handler(key_file=None, cert_file=None, timeout=None, verify=False, context=None):
"""This class returns an instance of the default HTTP request handler using
the values you provide.
@@ -1363,6 +1402,8 @@ def handler(key_file=None, cert_file=None, timeout=None, verify=False):
:type timeout: ``integer`` or "None"
:param `verify`: Set to False to disable SSL verification on https connections.
:type verify: ``Boolean``
+ :param `context`: The SSLContext that can is used with the HTTPSConnection when verify=True is enabled and context is specified
+ :type context: ``SSLContext`
"""
def connect(scheme, host, port):
@@ -1376,6 +1417,10 @@ def connect(scheme, host, port):
if not verify:
kwargs['context'] = ssl._create_unverified_context()
+ elif context:
+ # verify is True in elif branch and context is not None
+ kwargs['context'] = context
+
return six.moves.http_client.HTTPSConnection(host, port, **kwargs)
raise ValueError("unsupported scheme: %s" % scheme)
@@ -1385,7 +1430,7 @@ def request(url, message, **kwargs):
head = {
"Content-Length": str(len(body)),
"Host": host,
- "User-Agent": "splunk-sdk-python/1.6.16",
+ "User-Agent": "splunk-sdk-python/1.6.20",
"Accept": "*/*",
"Connection": "Close",
} # defaults
diff --git a/bin/ta_dmarc/aob_py3/splunklib/client.py b/bin/ta_dmarc/aob_py3/splunklib/client.py
index 39b1dcc..35d9e4f 100644
--- a/bin/ta_dmarc/aob_py3/splunklib/client.py
+++ b/bin/ta_dmarc/aob_py3/splunklib/client.py
@@ -75,6 +75,8 @@
namespace)
from .data import record
+logger = logging.getLogger(__name__)
+
__all__ = [
"connect",
"NotSupportedError",
@@ -224,7 +226,10 @@ def _load_atom_entries(response):
# Load the sid from the body of the given response
-def _load_sid(response):
+def _load_sid(response, output_mode):
+ if output_mode == "json":
+ json_obj = json.loads(response.body.read())
+ return json_obj.get('sid')
return _load_atom(response).response.sid
@@ -295,7 +300,7 @@ def connect(**kwargs):
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
- :param verify: Enable (True) or disable (False) SSL verrification for
+ :param verify: Enable (True) or disable (False) SSL verification for
https connections. (optional, the default is True)
:type verify: ``Boolean``
:param `owner`: The owner context of the namespace (optional).
@@ -318,6 +323,13 @@ def connect(**kwargs):
:type username: ``string``
:param `password`: The password for the Splunk account.
:type password: ``string``
+ :param retires: Number of retries for each HTTP connection (optional, the default is 0).
+ NOTE THAT THIS MAY INCREASE THE NUMBER OF ROUND TRIP CONNECTIONS TO THE SPLUNK SERVER.
+ :type retries: ``int``
+ :param retryDelay: How long to wait between connection attempts if `retries` > 0 (optional, defaults to 10s).
+ :type retryDelay: ``int`` (in seconds)
+ :param `context`: The SSLContext that can be used when setting verify=True (optional)
+ :type context: ``SSLContext``
:return: An initialized :class:`Service` connection.
**Example**::
@@ -365,7 +377,7 @@ class Service(_BaseService):
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
- :param verify: Enable (True) or disable (False) SSL verrification for
+ :param verify: Enable (True) or disable (False) SSL verification for
https connections. (optional, the default is True)
:type verify: ``Boolean``
:param `owner`: The owner context of the namespace (optional; use "-" for wildcard).
@@ -384,6 +396,11 @@ class Service(_BaseService):
:param `password`: The password, which is used to authenticate the Splunk
instance.
:type password: ``string``
+ :param retires: Number of retries for each HTTP connection (optional, the default is 0).
+ NOTE THAT THIS MAY INCREASE THE NUMBER OF ROUND TRIP CONNECTIONS TO THE SPLUNK SERVER.
+ :type retries: ``int``
+ :param retryDelay: How long to wait between connection attempts if `retries` > 0 (optional, defaults to 10s).
+ :type retryDelay: ``int`` (in seconds)
:return: A :class:`Service` instance.
**Example**::
@@ -401,6 +418,7 @@ class Service(_BaseService):
def __init__(self, **kwargs):
super(Service, self).__init__(**kwargs)
self._splunk_version = None
+ self._kvstore_owner = None
@property
def apps(self):
@@ -673,12 +691,34 @@ def splunk_version(self):
self._splunk_version = tuple([int(p) for p in self.info['version'].split('.')])
return self._splunk_version
+ @property
+ def kvstore_owner(self):
+ """Returns the KVStore owner for this instance of Splunk.
+
+ By default is the kvstore owner is not set, it will return "nobody"
+ :return: A string with the KVStore owner.
+ """
+ if self._kvstore_owner is None:
+ self._kvstore_owner = "nobody"
+ return self._kvstore_owner
+
+ @kvstore_owner.setter
+ def kvstore_owner(self, value):
+ """
+ kvstore is refreshed, when the owner value is changed
+ """
+ self._kvstore_owner = value
+ self.kvstore
+
@property
def kvstore(self):
"""Returns the collection of KV Store collections.
+ sets the owner for the namespace, before retrieving the KVStore Collection
+
:return: A :class:`KVStoreCollections` collection of :class:`KVStoreCollection` entities.
"""
+ self.namespace['owner'] = self.kvstore_owner
return KVStoreCollections(self)
@property
@@ -699,7 +739,7 @@ class Endpoint(object):
"""
def __init__(self, service, path):
self.service = service
- self.path = path if path.endswith('/') else path + '/'
+ self.path = path
def get(self, path_segment="", owner=None, app=None, sharing=None, **query):
"""Performs a GET operation on the path segment relative to this endpoint.
@@ -757,6 +797,8 @@ def get(self, path_segment="", owner=None, app=None, sharing=None, **query):
if path_segment.startswith('/'):
path = path_segment
else:
+ if not self.path.endswith('/') and path_segment != "":
+ self.path = self.path + '/'
path = self.service._abspath(self.path + path_segment, owner=owner,
app=app, sharing=sharing)
# ^-- This was "%s%s" % (self.path, path_segment).
@@ -817,6 +859,8 @@ def post(self, path_segment="", owner=None, app=None, sharing=None, **query):
if path_segment.startswith('/'):
path = path_segment
else:
+ if not self.path.endswith('/') and path_segment != "":
+ self.path = self.path + '/'
path = self.service._abspath(self.path + path_segment, owner=owner, app=app, sharing=sharing)
return self.service.post(path, owner=owner, app=app, sharing=sharing, **query)
@@ -828,35 +872,24 @@ class Entity(Endpoint):
``Entity`` provides the majority of functionality required by entities.
Subclasses only implement the special cases for individual entities.
- For example for deployment serverclasses, the subclass makes whitelists and
- blacklists into Python lists.
+ For example for saved searches, the subclass makes fields like ``action.email``,
+ ``alert_type``, and ``search`` available.
An ``Entity`` is addressed like a dictionary, with a few extensions,
- so the following all work::
-
- ent['email.action']
- ent['disabled']
- ent['whitelist']
-
- Many endpoints have values that share a prefix, such as
- ``email.to``, ``email.action``, and ``email.subject``. You can extract
- the whole fields, or use the key ``email`` to get a dictionary of
- all the subelements. That is, ``ent['email']`` returns a
- dictionary with the keys ``to``, ``action``, ``subject``, and so on. If
- there are multiple levels of dots, each level is made into a
- subdictionary, so ``email.body.salutation`` can be accessed at
- ``ent['email']['body']['salutation']`` or
- ``ent['email.body.salutation']``.
+ so the following all work, for example in saved searches::
+
+ ent['action.email']
+ ent['alert_type']
+ ent['search']
You can also access the fields as though they were the fields of a Python
object, as in::
- ent.email.action
- ent.disabled
- ent.whitelist
+ ent.alert_type
+ ent.search
However, because some of the field names are not valid Python identifiers,
- the dictionary-like syntax is preferrable.
+ the dictionary-like syntax is preferable.
The state of an :class:`Entity` object is cached, so accessing a field
does not contact the server. If you think the values on the
@@ -953,7 +986,10 @@ def __getitem__(self, key):
def _load_atom_entry(self, response):
elem = _load_atom(response, XNAME_ENTRY)
if isinstance(elem, list):
- raise AmbiguousReferenceException("Fetch from server returned multiple entries for name %s." % self.name)
+ apps = [ele.entry.content.get('eai:appName') for ele in elem]
+
+ raise AmbiguousReferenceException(
+ "Fetch from server returned multiple entries for name '%s' in apps %s." % (elem[0].entry.title, apps))
else:
return elem.entry
@@ -1059,8 +1095,6 @@ def content(self):
def disable(self):
"""Disables the entity at this endpoint."""
self.post("disable")
- if self.service.restart_required:
- self.service.restart(120)
return self
def enable(self):
@@ -1444,7 +1478,7 @@ def iter(self, offset=0, count=None, pagesize=None, **kwargs):
if pagesize is None or N < pagesize:
break
offset += N
- logging.debug("pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s", pagesize, fetched, offset, N, kwargs)
+ logger.debug("pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s", pagesize, fetched, offset, N, kwargs)
# kwargs: count, offset, search, sort_dir, sort_key, sort_mode
def list(self, count=None, **kwargs):
@@ -2086,10 +2120,6 @@ def submit(self, event, host=None, source=None, sourcetype=None):
if source is not None: args['source'] = source
if sourcetype is not None: args['sourcetype'] = sourcetype
- # The reason we use service.request directly rather than POST
- # is that we are not sending a POST request encoded using
- # x-www-form-urlencoded (as we do not have a key=value body),
- # because we aren't really sending a "form".
self.service.post(PATH_RECEIVERS_SIMPLE, body=event, **args)
return self
@@ -2517,9 +2547,9 @@ def list(self, *kinds, **kwargs):
kinds = self.kinds
if len(kinds) == 1:
kind = kinds[0]
- logging.debug("Inputs.list taking short circuit branch for single kind.")
+ logger.debug("Inputs.list taking short circuit branch for single kind.")
path = self.kindpath(kind)
- logging.debug("Path for inputs: %s", path)
+ logger.debug("Path for inputs: %s", path)
try:
path = UrlEncoded(path, skip_encode=True)
response = self.get(path, **kwargs)
@@ -2737,9 +2767,8 @@ def pause(self):
return self
def results(self, **query_params):
- """Returns a streaming handle to this job's search results. To get a
- nice, Pythonic iterator, pass the handle to :class:`splunklib.results.ResultsReader`,
- as in::
+ """Returns a streaming handle to this job's search results. To get a nice, Pythonic iterator, pass the handle
+ to :class:`splunklib.results.JSONResultsReader` along with the query param "output_mode='json'", as in::
import splunklib.client as client
import splunklib.results as results
@@ -2748,7 +2777,7 @@ def results(self, **query_params):
job = service.jobs.create("search * | head 5")
while not job.is_done():
sleep(.2)
- rr = results.ResultsReader(job.results())
+ rr = results.JSONResultsReader(job.results(output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -2778,19 +2807,17 @@ def results(self, **query_params):
def preview(self, **query_params):
"""Returns a streaming handle to this job's preview search results.
- Unlike :class:`splunklib.results.ResultsReader`, which requires a job to
- be finished to
- return any results, the ``preview`` method returns any results that have
- been generated so far, whether the job is running or not. The
- returned search results are the raw data from the server. Pass
- the handle returned to :class:`splunklib.results.ResultsReader` to get a
- nice, Pythonic iterator over objects, as in::
+ Unlike :class:`splunklib.results.JSONResultsReader`along with the query param "output_mode='json'",
+ which requires a job to be finished to return any results, the ``preview`` method returns any results that
+ have been generated so far, whether the job is running or not. The returned search results are the raw data
+ from the server. Pass the handle returned to :class:`splunklib.results.JSONResultsReader` to get a nice,
+ Pythonic iterator over objects, as in::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
job = service.jobs.create("search * | head 5")
- rr = results.ResultsReader(job.preview())
+ rr = results.JSONResultsReader(job.preview(output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -2941,19 +2968,19 @@ def create(self, query, **kwargs):
if kwargs.get("exec_mode", None) == "oneshot":
raise TypeError("Cannot specify exec_mode=oneshot; use the oneshot method instead.")
response = self.post(search=query, **kwargs)
- sid = _load_sid(response)
+ sid = _load_sid(response, kwargs.get("output_mode", None))
return Job(self.service, sid)
def export(self, query, **params):
- """Runs a search and immediately starts streaming preview events.
- This method returns a streaming handle to this job's events as an XML
- document from the server. To parse this stream into usable Python objects,
- pass the handle to :class:`splunklib.results.ResultsReader`::
+ """Runs a search and immediately starts streaming preview events. This method returns a streaming handle to
+ this job's events as an XML document from the server. To parse this stream into usable Python objects,
+ pass the handle to :class:`splunklib.results.JSONResultsReader` along with the query param
+ "output_mode='json'"::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
- rr = results.ResultsReader(service.jobs.export("search * | head 5"))
+ rr = results.JSONResultsReader(service.jobs.export("search * | head 5",output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -3002,14 +3029,14 @@ def itemmeta(self):
def oneshot(self, query, **params):
"""Run a oneshot search and returns a streaming handle to the results.
- The ``InputStream`` object streams XML fragments from the server. To
- parse this stream into usable Python objects,
- pass the handle to :class:`splunklib.results.ResultsReader`::
+ The ``InputStream`` object streams fragments from the server. To parse this stream into usable Python
+ objects, pass the handle to :class:`splunklib.results.JSONResultsReader` along with the query param
+ "output_mode='json'" ::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
- rr = results.ResultsReader(service.jobs.oneshot("search * | head 5"))
+ rr = results.JSONResultsReader(service.jobs.oneshot("search * | head 5",output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -3157,7 +3184,7 @@ def dispatch(self, **kwargs):
:return: The :class:`Job`.
"""
response = self.post("dispatch", **kwargs)
- sid = _load_sid(response)
+ sid = _load_sid(response, kwargs.get("output_mode", None))
return Job(self.service, sid)
@property
@@ -3591,7 +3618,7 @@ def update_index(self, name, value):
:return: Result of POST request
"""
kwargs = {}
- kwargs['index.' + name] = value if isinstance(value, basestring) else json.dumps(value)
+ kwargs['index.' + name] = value if isinstance(value, six.string_types) else json.dumps(value)
return self.post(**kwargs)
def update_field(self, name, value):
@@ -3619,7 +3646,7 @@ def __init__(self, collection):
self.service = collection.service
self.collection = collection
self.owner, self.app, self.sharing = collection._proper_namespace()
- self.path = 'storage/collections/data/' + UrlEncoded(self.collection.name) + '/'
+ self.path = 'storage/collections/data/' + UrlEncoded(self.collection.name, encode_slash=True) + '/'
def _get(self, url, **kwargs):
return self.service.get(self.path + url, owner=self.owner, app=self.app, sharing=self.sharing, **kwargs)
@@ -3640,6 +3667,11 @@ def query(self, **query):
:return: Array of documents retrieved by query.
:rtype: ``array``
"""
+
+ for key, value in query.items():
+ if isinstance(query[key], dict):
+ query[key] = json.dumps(value)
+
return json.loads(self._get('', **query).body.read().decode('utf-8'))
def query_by_id(self, id):
@@ -3652,7 +3684,7 @@ def query_by_id(self, id):
:return: Document with id
:rtype: ``dict``
"""
- return json.loads(self._get(UrlEncoded(str(id))).body.read().decode('utf-8'))
+ return json.loads(self._get(UrlEncoded(str(id), encode_slash=True)).body.read().decode('utf-8'))
def insert(self, data):
"""
@@ -3664,6 +3696,8 @@ def insert(self, data):
:return: _id of inserted object
:rtype: ``dict``
"""
+ if isinstance(data, dict):
+ data = json.dumps(data)
return json.loads(self._post('', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
def delete(self, query=None):
@@ -3686,7 +3720,7 @@ def delete_by_id(self, id):
:return: Result of DELETE request
"""
- return self._delete(UrlEncoded(str(id)))
+ return self._delete(UrlEncoded(str(id), encode_slash=True))
def update(self, id, data):
"""
@@ -3700,7 +3734,9 @@ def update(self, id, data):
:return: id of replaced document
:rtype: ``dict``
"""
- return json.loads(self._post(UrlEncoded(str(id)), headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
+ if isinstance(data, dict):
+ data = json.dumps(data)
+ return json.loads(self._post(UrlEncoded(str(id), encode_slash=True), headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
def batch_find(self, *dbqueries):
"""
diff --git a/bin/ta_dmarc/aob_py3/splunklib/data.py b/bin/ta_dmarc/aob_py3/splunklib/data.py
index dedbb33..f9ffb86 100644
--- a/bin/ta_dmarc/aob_py3/splunklib/data.py
+++ b/bin/ta_dmarc/aob_py3/splunklib/data.py
@@ -161,8 +161,8 @@ def load_value(element, nametable=None):
text = element.text
if text is None:
return None
- text = text.strip()
- if len(text) == 0:
+
+ if len(text.strip()) == 0:
return None
return text
diff --git a/bin/ta_dmarc/aob_py3/splunklib/modularinput/event_writer.py b/bin/ta_dmarc/aob_py3/splunklib/modularinput/event_writer.py
old mode 100644
new mode 100755
index 3e43210..5f8c5aa
--- a/bin/ta_dmarc/aob_py3/splunklib/modularinput/event_writer.py
+++ b/bin/ta_dmarc/aob_py3/splunklib/modularinput/event_writer.py
@@ -15,7 +15,6 @@
from __future__ import absolute_import
import sys
-from io import TextIOWrapper, TextIOBase
from splunklib.six import ensure_str
from .event import ET
@@ -83,5 +82,6 @@ def write_xml_document(self, document):
def close(self):
"""Write the closing tag to make this XML well formed."""
- self._out.write("")
+ if self.header_written:
+ self._out.write("")
self._out.flush()
diff --git a/bin/ta_dmarc/aob_py3/splunklib/modularinput/utils.py b/bin/ta_dmarc/aob_py3/splunklib/modularinput/utils.py
index 853694a..3d42b63 100644
--- a/bin/ta_dmarc/aob_py3/splunklib/modularinput/utils.py
+++ b/bin/ta_dmarc/aob_py3/splunklib/modularinput/utils.py
@@ -64,11 +64,14 @@ def parse_parameters(param_node):
def parse_xml_data(parent_node, child_node_tag):
data = {}
for child in parent_node:
+ child_name = child.get("name")
if child.tag == child_node_tag:
if child_node_tag == "stanza":
- data[child.get("name")] = {}
+ data[child_name] = {
+ "__app": child.get("app", None)
+ }
for param in child:
- data[child.get("name")][param.get("name")] = parse_parameters(param)
+ data[child_name][param.get("name")] = parse_parameters(param)
elif "item" == parent_node.tag:
- data[child.get("name")] = parse_parameters(child)
+ data[child_name] = parse_parameters(child)
return data
diff --git a/bin/ta_dmarc/aob_py3/splunklib/results.py b/bin/ta_dmarc/aob_py3/splunklib/results.py
index 20501c5..8543ab0 100644
--- a/bin/ta_dmarc/aob_py3/splunklib/results.py
+++ b/bin/ta_dmarc/aob_py3/splunklib/results.py
@@ -23,7 +23,7 @@
accessing search results while avoiding buffering the result set, which can be
very large.
-To use the reader, instantiate :class:`ResultsReader` on a search result stream
+To use the reader, instantiate :class:`JSONResultsReader` on a search result stream
as follows:::
reader = ResultsReader(result_stream)
@@ -34,18 +34,19 @@
from __future__ import absolute_import
-from io import BytesIO
+from io import BufferedReader, BytesIO
from splunklib import six
+
+from splunklib.six import deprecated
+
try:
import xml.etree.cElementTree as et
except:
import xml.etree.ElementTree as et
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from .ordereddict import OrderedDict
+from collections import OrderedDict
+from json import loads as json_loads
try:
from splunklib.six.moves import cStringIO as StringIO
@@ -54,9 +55,11 @@
__all__ = [
"ResultsReader",
- "Message"
+ "Message",
+ "JSONResultsReader"
]
+
class Message(object):
"""This class represents informational messages that Splunk interleaves in the results stream.
@@ -67,6 +70,7 @@ class Message(object):
m = Message("DEBUG", "There's something in that variable...")
"""
+
def __init__(self, type_, message):
self.type = type_
self.message = message
@@ -80,6 +84,7 @@ def __eq__(self, other):
def __hash__(self):
return hash((self.type, self.message))
+
class _ConcatenatedStream(object):
"""Lazily concatenate zero or more streams into a stream.
@@ -92,6 +97,7 @@ class _ConcatenatedStream(object):
s = _ConcatenatedStream(StringIO("abc"), StringIO("def"))
assert s.read() == "abcdef"
"""
+
def __init__(self, *streams):
self.streams = list(streams)
@@ -110,6 +116,7 @@ def read(self, n=None):
del self.streams[0]
return response
+
class _XMLDTDFilter(object):
"""Lazily remove all XML DTDs from a stream.
@@ -123,6 +130,7 @@ class _XMLDTDFilter(object):
s = _XMLDTDFilter("")
assert s.read() == ""
"""
+
def __init__(self, stream):
self.stream = stream
@@ -153,6 +161,8 @@ def read(self, n=None):
n -= 1
return response
+
+@deprecated("Use the JSONResultsReader function instead in conjuction with the 'output_mode' query param set to 'json'")
class ResultsReader(object):
"""This class returns dictionaries and Splunk messages from an XML results
stream.
@@ -180,6 +190,7 @@ class ResultsReader(object):
print "Message: %s" % result
print "is_preview = %s " % reader.is_preview
"""
+
# Be sure to update the docstrings of client.Jobs.oneshot,
# client.Job.results_preview and client.Job.results to match any
# changes made to ResultsReader.
@@ -260,16 +271,16 @@ def _parse_results(self, stream):
# So we'll define it here
def __itertext(self):
- tag = self.tag
- if not isinstance(tag, six.string_types) and tag is not None:
- return
- if self.text:
- yield self.text
- for e in self:
- for s in __itertext(e):
- yield s
- if e.tail:
- yield e.tail
+ tag = self.tag
+ if not isinstance(tag, six.string_types) and tag is not None:
+ return
+ if self.text:
+ yield self.text
+ for e in self:
+ for s in __itertext(e):
+ yield s
+ if e.tail:
+ yield e.tail
text = "".join(__itertext(elem))
values.append(text)
@@ -291,5 +302,72 @@ def __itertext(self):
raise
+class JSONResultsReader(object):
+ """This class returns dictionaries and Splunk messages from a JSON results
+ stream.
+ ``JSONResultsReader`` is iterable, and returns a ``dict`` for results, or a
+ :class:`Message` object for Splunk messages. This class has one field,
+ ``is_preview``, which is ``True`` when the results are a preview from a
+ running search, or ``False`` when the results are from a completed search.
+
+ This function has no network activity other than what is implicit in the
+ stream it operates on.
+
+ :param `stream`: The stream to read from (any object that supports``.read()``).
+
+ **Example**::
+
+ import results
+ response = ... # the body of an HTTP response
+ reader = results.JSONResultsReader(response)
+ for result in reader:
+ if isinstance(result, dict):
+ print "Result: %s" % result
+ elif isinstance(result, results.Message):
+ print "Message: %s" % result
+ print "is_preview = %s " % reader.is_preview
+ """
+
+ # Be sure to update the docstrings of client.Jobs.oneshot,
+ # client.Job.results_preview and client.Job.results to match any
+ # changes made to JSONResultsReader.
+ #
+ # This wouldn't be a class, just the _parse_results function below,
+ # except that you cannot get the current generator inside the
+ # function creating that generator. Thus it's all wrapped up for
+ # the sake of one field.
+ def __init__(self, stream):
+ # The search/jobs/exports endpoint, when run with
+ # earliest_time=rt and latest_time=rt, output_mode=json, streams a sequence of
+ # JSON documents, each containing a result, as opposed to one
+ # results element containing lots of results.
+ stream = BufferedReader(stream)
+ self.is_preview = None
+ self._gen = self._parse_results(stream)
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ return next(self._gen)
+ __next__ = next
+ def _parse_results(self, stream):
+ """Parse results and messages out of *stream*."""
+ for line in stream.readlines():
+ strip_line = line.strip()
+ if strip_line.__len__() == 0: continue
+ parsed_line = json_loads(strip_line)
+ if "preview" in parsed_line:
+ self.is_preview = parsed_line["preview"]
+ if "messages" in parsed_line and parsed_line["messages"].__len__() > 0:
+ for message in parsed_line["messages"]:
+ msg_type = message.get("type", "Unknown Message Type")
+ text = message.get("text")
+ yield Message(msg_type, text)
+ if "result" in parsed_line:
+ yield parsed_line["result"]
+ if "results" in parsed_line:
+ for result in parsed_line["results"]:
+ yield result
diff --git a/bin/ta_dmarc/aob_py3/splunklib/searchcommands/__init__.py b/bin/ta_dmarc/aob_py3/splunklib/searchcommands/__init__.py
index c56c510..8a92903 100644
--- a/bin/ta_dmarc/aob_py3/splunklib/searchcommands/__init__.py
+++ b/bin/ta_dmarc/aob_py3/splunklib/searchcommands/__init__.py
@@ -134,9 +134,13 @@
.. topic:: References
- 1. `Search command style guide `__
+ 1. `Custom Search Command manual: `__
- 2. `Commands.conf.spec `_
+ 2. `Create Custom Search Commands with commands.conf.spec `_
+
+ 3. `Configure seach assistant with searchbnf.conf `_
+
+ 4. `Control search distribution with distsearch.conf `_
"""
diff --git a/bin/ta_dmarc/aob_py3/splunklib/searchcommands/decorators.py b/bin/ta_dmarc/aob_py3/splunklib/searchcommands/decorators.py
index 36590a7..d8b3f48 100644
--- a/bin/ta_dmarc/aob_py3/splunklib/searchcommands/decorators.py
+++ b/bin/ta_dmarc/aob_py3/splunklib/searchcommands/decorators.py
@@ -17,10 +17,7 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from splunklib import six
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from ..ordereddict import OrderedDict
+from collections import OrderedDict # must be python 2.7
from inspect import getmembers, isclass, isfunction
from splunklib.six.moves import map as imap
diff --git a/bin/ta_dmarc/aob_py3/splunklib/searchcommands/generating_command.py b/bin/ta_dmarc/aob_py3/splunklib/searchcommands/generating_command.py
index 724d45d..6a75d2c 100644
--- a/bin/ta_dmarc/aob_py3/splunklib/searchcommands/generating_command.py
+++ b/bin/ta_dmarc/aob_py3/splunklib/searchcommands/generating_command.py
@@ -15,6 +15,7 @@
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
+import sys
from .decorators import ConfigurationSetting
from .search_command import SearchCommand
@@ -212,13 +213,49 @@ def _execute(self, ifile, process):
def _execute_chunk_v2(self, process, chunk):
count = 0
+ records = []
for row in process:
- self._record_writer.write_record(row)
+ records.append(row)
count += 1
if count == self._record_writer._maxresultrows:
- self._finished = False
- return
- self._finished = True
+ break
+
+ for row in records:
+ self._record_writer.write_record(row)
+
+ if count == self._record_writer._maxresultrows:
+ self._finished = False
+ else:
+ self._finished = True
+
+ def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout, allow_empty_input=True):
+ """ Process data.
+
+ :param argv: Command line arguments.
+ :type argv: list or tuple
+
+ :param ifile: Input data file.
+ :type ifile: file
+
+ :param ofile: Output data file.
+ :type ofile: file
+
+ :param allow_empty_input: For generating commands, it must be true. Doing otherwise will cause an error.
+ :type allow_empty_input: bool
+
+ :return: :const:`None`
+ :rtype: NoneType
+
+ """
+
+ # Generating commands are expected to run on an empty set of inputs as the first command being run in a search,
+ # also this class implements its own separate _execute_chunk_v2 method which does not respect allow_empty_input
+ # so ensure that allow_empty_input is always True
+
+ if not allow_empty_input:
+ raise ValueError("allow_empty_input cannot be False for Generating Commands")
+ else:
+ return super(GeneratingCommand, self).process(argv=argv, ifile=ifile, ofile=ofile, allow_empty_input=True)
# endregion
diff --git a/bin/ta_dmarc/aob_py3/splunklib/searchcommands/internals.py b/bin/ta_dmarc/aob_py3/splunklib/searchcommands/internals.py
index 85f9e0f..1ea2833 100644
--- a/bin/ta_dmarc/aob_py3/splunklib/searchcommands/internals.py
+++ b/bin/ta_dmarc/aob_py3/splunklib/searchcommands/internals.py
@@ -19,10 +19,7 @@
from io import TextIOWrapper
from collections import deque, namedtuple
from splunklib import six
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from ..ordereddict import OrderedDict
+from collections import OrderedDict
from splunklib.six.moves import StringIO
from itertools import chain
from splunklib.six.moves import map as imap
@@ -508,6 +505,7 @@ def __init__(self, ofile, maxresultrows=None):
self._chunk_count = 0
self._pending_record_count = 0
self._committed_record_count = 0
+ self.custom_fields = set()
@property
def is_flushed(self):
@@ -572,6 +570,7 @@ def write_record(self, record):
def write_records(self, records):
self._ensure_validity()
+ records = list(records)
write_record = self._write_record
for record in records:
write_record(record)
@@ -593,6 +592,7 @@ def _write_record(self, record):
if fieldnames is None:
self._fieldnames = fieldnames = list(record.keys())
+ self._fieldnames.extend([i for i in self.custom_fields if i not in self._fieldnames])
value_list = imap(lambda fn: (str(fn), str('__mv_') + str(fn)), fieldnames)
self._writerow(list(chain.from_iterable(value_list)))
diff --git a/bin/ta_dmarc/aob_py3/splunklib/searchcommands/search_command.py b/bin/ta_dmarc/aob_py3/splunklib/searchcommands/search_command.py
index 7383a5e..dd11391 100644
--- a/bin/ta_dmarc/aob_py3/splunklib/searchcommands/search_command.py
+++ b/bin/ta_dmarc/aob_py3/splunklib/searchcommands/search_command.py
@@ -22,10 +22,7 @@
import io
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from ..ordereddict import OrderedDict
+from collections import OrderedDict
from copy import deepcopy
from splunklib.six.moves import StringIO
from itertools import chain, islice
@@ -124,6 +121,7 @@ def __init__(self):
self._default_logging_level = self._logger.level
self._record_writer = None
self._records = None
+ self._allow_empty_input = True
def __str__(self):
text = ' '.join(chain((type(self).name, str(self.options)), [] if self.fieldnames is None else self.fieldnames))
@@ -172,6 +170,14 @@ def logging_level(self, value):
raise ValueError('Unrecognized logging level: {}'.format(value))
self._logger.setLevel(level)
+ def add_field(self, current_record, field_name, field_value):
+ self._record_writer.custom_fields.add(field_name)
+ current_record[field_name] = field_value
+
+ def gen_record(self, **record):
+ self._record_writer.custom_fields |= set(record.keys())
+ return record
+
record = Option(doc='''
**Syntax: record=
@@ -413,7 +419,7 @@ def prepare(self):
"""
pass
- def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout):
+ def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout, allow_empty_input=True):
""" Process data.
:param argv: Command line arguments.
@@ -425,10 +431,16 @@ def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout):
:param ofile: Output data file.
:type ofile: file
+ :param allow_empty_input: Allow empty input records for the command, if False an Error will be returned if empty chunk body is encountered when read
+ :type allow_empty_input: bool
+
:return: :const:`None`
:rtype: NoneType
"""
+
+ self._allow_empty_input = allow_empty_input
+
if len(argv) > 1:
self._process_protocol_v1(argv, ifile, ofile)
else:
@@ -634,6 +646,19 @@ def _process_protocol_v1(self, argv, ifile, ofile):
debug('%s.process finished under protocol_version=1', class_name)
+ def _protocol_v2_option_parser(self, arg):
+ """ Determines if an argument is an Option/Value pair, or just a Positional Argument.
+ Method so different search commands can handle parsing of arguments differently.
+
+ :param arg: A single argument provided to the command from SPL
+ :type arg: str
+
+ :return: [OptionName, OptionValue] OR [PositionalArgument]
+ :rtype: List[str]
+
+ """
+ return arg.split('=', 1)
+
def _process_protocol_v2(self, argv, ifile, ofile):
""" Processes records on the `input stream optionally writing records to the output stream.
@@ -704,7 +729,7 @@ def _process_protocol_v2(self, argv, ifile, ofile):
if args and type(args) == list:
for arg in args:
- result = arg.split('=', 1)
+ result = self._protocol_v2_option_parser(arg)
if len(result) == 1:
self.fieldnames.append(str(result[0]))
else:
@@ -965,13 +990,14 @@ def _execute_v2(self, ifile, process):
def _execute_chunk_v2(self, process, chunk):
metadata, body = chunk
- if len(body) <= 0:
- return
+ if len(body) <= 0 and not self._allow_empty_input:
+ raise ValueError(
+ "No records found to process. Set allow_empty_input=True in dispatch function to move forward "
+ "with empty records.")
records = self._read_csv_records(StringIO(body))
self._record_writer.write_records(process(records))
-
def _report_unexpected_error(self):
error_type, error, tb = sys.exc_info()
@@ -1063,8 +1089,7 @@ def iteritems(self):
SearchMetric = namedtuple('SearchMetric', ('elapsed_seconds', 'invocation_count', 'input_count', 'output_count'))
-
-def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys.stdout, module_name=None):
+def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys.stdout, module_name=None, allow_empty_input=True):
""" Instantiates and executes a search command class
This function implements a `conditional script stanza `_ based on the value of
@@ -1087,6 +1112,8 @@ def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys
:type output_file: :code:`file`
:param module_name: Name of the module calling :code:`dispatch` or :const:`None`.
:type module_name: :code:`basestring`
+ :param allow_empty_input: Allow empty input records for the command, if False an Error will be returned if empty chunk body is encountered when read
+ :type allow_empty_input: bool
:returns: :const:`None`
**Example**
@@ -1124,4 +1151,4 @@ def stream(records):
assert issubclass(command_class, SearchCommand)
if module_name is None or module_name == '__main__':
- command_class().process(argv, input_file, output_file)
+ command_class().process(argv, input_file, output_file, allow_empty_input)
diff --git a/bin/ta_dmarc/aob_py3/splunklib/searchcommands/validators.py b/bin/ta_dmarc/aob_py3/splunklib/searchcommands/validators.py
index 0278fbd..22f0e16 100644
--- a/bin/ta_dmarc/aob_py3/splunklib/searchcommands/validators.py
+++ b/bin/ta_dmarc/aob_py3/splunklib/searchcommands/validators.py
@@ -95,10 +95,7 @@ def __call__(self, value):
try:
return Code.object(compile(value, 'string', self._mode), six.text_type(value))
except (SyntaxError, TypeError) as error:
- if six.PY2:
- message = error.message
- else:
- message = str(error)
+ message = str(error)
six.raise_from(ValueError(message), error)
@@ -204,6 +201,48 @@ def format(self, value):
return None if value is None else six.text_type(int(value))
+class Float(Validator):
+ """ Validates float option values.
+
+ """
+ def __init__(self, minimum=None, maximum=None):
+ if minimum is not None and maximum is not None:
+ def check_range(value):
+ if not (minimum <= value <= maximum):
+ raise ValueError('Expected float in the range [{0},{1}], not {2}'.format(minimum, maximum, value))
+ return
+ elif minimum is not None:
+ def check_range(value):
+ if value < minimum:
+ raise ValueError('Expected float in the range [{0},+∞], not {1}'.format(minimum, value))
+ return
+ elif maximum is not None:
+ def check_range(value):
+ if value > maximum:
+ raise ValueError('Expected float in the range [-∞,{0}], not {1}'.format(maximum, value))
+ return
+ else:
+ def check_range(value):
+ return
+
+ self.check_range = check_range
+ return
+
+ def __call__(self, value):
+ if value is None:
+ return None
+ try:
+ value = float(value)
+ except ValueError:
+ raise ValueError('Expected float value, not {}'.format(json_encode_string(value)))
+
+ self.check_range(value)
+ return value
+
+ def format(self, value):
+ return None if value is None else six.text_type(float(value))
+
+
class Duration(Validator):
""" Validates duration option values.
@@ -391,4 +430,4 @@ def format(self, value):
return self.__call__(value)
-__all__ = ['Boolean', 'Code', 'Duration', 'File', 'Integer', 'List', 'Map', 'RegularExpression', 'Set']
+__all__ = ['Boolean', 'Code', 'Duration', 'File', 'Integer', 'Float', 'List', 'Map', 'RegularExpression', 'Set']
diff --git a/bin/ta_dmarc/aob_py3/splunklib/six.py b/bin/ta_dmarc/aob_py3/splunklib/six.py
index 5fe9f8e..d13e50c 100644
--- a/bin/ta_dmarc/aob_py3/splunklib/six.py
+++ b/bin/ta_dmarc/aob_py3/splunklib/six.py
@@ -978,3 +978,16 @@ def python_2_unicode_compatible(klass):
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
+
+import warnings
+
+def deprecated(message):
+ def deprecated_decorator(func):
+ def deprecated_func(*args, **kwargs):
+ warnings.warn("{} is a deprecated function. {}".format(func.__name__, message),
+ category=DeprecationWarning,
+ stacklevel=2)
+ warnings.simplefilter('default', DeprecationWarning)
+ return func(*args, **kwargs)
+ return deprecated_func
+ return deprecated_decorator
\ No newline at end of file
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/__init__.py b/bin/ta_dmarc/solnlib/packages/splunklib/__init__.py
index 92a9d0c..1f9fc68 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/__init__.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/__init__.py
@@ -14,6 +14,22 @@
"""Python library for Splunk."""
-__version_info__ = (1, 6, 0)
-__version__ = ".".join(map(str, __version_info__))
+from __future__ import absolute_import
+from splunklib.six.moves import map
+import logging
+
+DEFAULT_LOG_FORMAT = '%(asctime)s, Level=%(levelname)s, Pid=%(process)s, Logger=%(name)s, File=%(filename)s, ' \
+ 'Line=%(lineno)s, %(message)s'
+DEFAULT_DATE_FORMAT = '%Y-%m-%d %H:%M:%S %Z'
+
+# To set the logging level of splunklib
+# ex. To enable debug logs, call this method with parameter 'logging.DEBUG'
+# default logging level is set to 'WARNING'
+def setup_logging(level, log_format=DEFAULT_LOG_FORMAT, date_format=DEFAULT_DATE_FORMAT):
+ logging.basicConfig(level=level,
+ format=log_format,
+ datefmt=date_format)
+
+__version_info__ = (1, 6, 20)
+__version__ = ".".join(map(str, __version_info__))
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/binding.py b/bin/ta_dmarc/solnlib/packages/splunklib/binding.py
index 073a78d..bb2771d 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/binding.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/binding.py
@@ -24,29 +24,32 @@
:mod:`splunklib.client` module.
"""
-import httplib
+from __future__ import absolute_import
+
+import io
import logging
import socket
import ssl
-import urllib
-import io
import sys
-import Cookie
-
+import time
from base64 import b64encode
+from contextlib import contextmanager
from datetime import datetime
from functools import wraps
-from StringIO import StringIO
+from io import BytesIO
+from xml.etree.ElementTree import XML
-from contextlib import contextmanager
+from splunklib import six
+from splunklib.six.moves import urllib
+
+from .data import record
-from xml.etree.ElementTree import XML
try:
from xml.etree.ElementTree import ParseError
-except ImportError, e:
+except ImportError as e:
from xml.parsers.expat import ExpatError as ParseError
-from .data import record
+logger = logging.getLogger(__name__)
__all__ = [
"AuthenticationError",
@@ -68,7 +71,7 @@ def new_f(*args, **kwargs):
start_time = datetime.now()
val = f(*args, **kwargs)
end_time = datetime.now()
- logging.debug("Operation took %s", end_time-start_time)
+ logger.debug("Operation took %s", end_time-start_time)
return val
return new_f
@@ -78,6 +81,7 @@ def _parse_cookies(cookie_str, dictionary):
then updates the the dictionary with any key-value pairs found.
**Example**::
+
dictionary = {}
_parse_cookies('my=value', dictionary)
# Now the following is True
@@ -88,7 +92,7 @@ def _parse_cookies(cookie_str, dictionary):
:param dictionary: A dictionary to update with any found key-value pairs.
:type dictionary: ``dict``
"""
- parsed_cookie = Cookie.SimpleCookie(cookie_str)
+ parsed_cookie = six.moves.http_cookies.SimpleCookie(cookie_str)
for cookie in parsed_cookie.values():
dictionary[cookie.key] = cookie.coded_value
@@ -168,12 +172,12 @@ def __new__(self, val='', skip_encode=False, encode_slash=False):
elif skip_encode:
return str.__new__(self, val)
elif encode_slash:
- return str.__new__(self, urllib.quote_plus(val))
+ return str.__new__(self, urllib.parse.quote_plus(val))
else:
# When subclassing str, just call str's __new__ method
# with your class and the value you want to have in the
# new string.
- return str.__new__(self, urllib.quote(val))
+ return str.__new__(self, urllib.parse.quote(val))
def __add__(self, other):
"""self + other
@@ -184,7 +188,7 @@ def __add__(self, other):
if isinstance(other, UrlEncoded):
return UrlEncoded(str.__add__(self, other), skip_encode=True)
else:
- return UrlEncoded(str.__add__(self, urllib.quote(other)), skip_encode=True)
+ return UrlEncoded(str.__add__(self, urllib.parse.quote(other)), skip_encode=True)
def __radd__(self, other):
"""other + self
@@ -195,7 +199,7 @@ def __radd__(self, other):
if isinstance(other, UrlEncoded):
return UrlEncoded(str.__radd__(self, other), skip_encode=True)
else:
- return UrlEncoded(str.__add__(urllib.quote(other), self), skip_encode=True)
+ return UrlEncoded(str.__add__(urllib.parse.quote(other), self), skip_encode=True)
def __mod__(self, fields):
"""Interpolation into ``UrlEncoded``s is disabled.
@@ -205,7 +209,7 @@ def __mod__(self, fields):
"""
raise TypeError("Cannot interpolate into a UrlEncoded object.")
def __repr__(self):
- return "UrlEncoded(%s)" % repr(urllib.unquote(str(self)))
+ return "UrlEncoded(%s)" % repr(urllib.parse.unquote(str(self)))
@contextmanager
def _handle_auth_error(msg):
@@ -293,8 +297,7 @@ def wrapper(self, *args, **kwargs):
with _handle_auth_error("Autologin failed."):
self.login()
with _handle_auth_error(
- "Autologin succeeded, but there was an auth error on "
- "next request. Something is very wrong."):
+ "Authentication Failed! If session token is used, it seems to have been expired."):
return request_fun(self, *args, **kwargs)
elif he.status == 401 and not self.autologin:
raise AuthenticationError(
@@ -429,6 +432,8 @@ class Context(object):
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
+ :param verify: Enable (True) or disable (False) SSL verrification for https connections.
+ :type verify: ``Boolean``
:param sharing: The sharing mode for the namespace (the default is "user").
:type sharing: "global", "system", "app", or "user"
:param owner: The owner context of the namespace (optional, the default is "None").
@@ -445,6 +450,16 @@ class Context(object):
:type username: ``string``
:param password: The password for the Splunk account.
:type password: ``string``
+ :param splunkToken: Splunk authentication token
+ :type splunkToken: ``string``
+ :param headers: List of extra HTTP headers to send (optional).
+ :type headers: ``list`` of 2-tuples.
+ :param retires: Number of retries for each HTTP connection (optional, the default is 0).
+ NOTE THAT THIS MAY INCREASE THE NUMBER OF ROUND TRIP CONNECTIONS TO THE SPLUNK SERVER AND BLOCK THE
+ CURRENT THREAD WHILE RETRYING.
+ :type retries: ``int``
+ :param retryDelay: How long to wait between connection attempts if `retries` > 0 (optional, defaults to 10s).
+ :type retryDelay: ``int`` (in seconds)
:param handler: The HTTP request handler (optional).
:returns: A ``Context`` instance.
@@ -461,7 +476,9 @@ class Context(object):
c = binding.Context(cookie="splunkd_8089=...")
"""
def __init__(self, handler=None, **kwargs):
- self.http = HttpLib(handler)
+ self.http = HttpLib(handler, kwargs.get("verify", False), key_file=kwargs.get("key_file"),
+ cert_file=kwargs.get("cert_file"), context=kwargs.get("context"), # Default to False for backward compat
+ retries=kwargs.get("retries", 0), retryDelay=kwargs.get("retryDelay", 10))
self.token = kwargs.get("token", _NoAuthenticationToken)
if self.token is None: # In case someone explicitly passes token=None
self.token = _NoAuthenticationToken
@@ -473,10 +490,12 @@ def __init__(self, handler=None, **kwargs):
self.username = kwargs.get("username", "")
self.password = kwargs.get("password", "")
self.basic = kwargs.get("basic", False)
+ self.bearerToken = kwargs.get("splunkToken", "")
self.autologin = kwargs.get("autologin", False)
+ self.additional_headers = kwargs.get("headers", [])
# Store any cookies in the self.http._cookies dict
- if kwargs.has_key("cookie") and kwargs['cookie'] not in [None, _NoAuthenticationToken]:
+ if "cookie" in kwargs and kwargs['cookie'] not in [None, _NoAuthenticationToken]:
_parse_cookies(kwargs["cookie"], self.http._cookies)
def get_cookies(self):
@@ -488,13 +507,13 @@ def get_cookies(self):
return self.http._cookies
def has_cookies(self):
- """Returns true if the ``HttpLib`` member of this instance has at least
- one cookie stored.
+ """Returns true if the ``HttpLib`` member of this instance has auth token stored.
- :return: ``True`` if there is at least one cookie, else ``False``
+ :return: ``True`` if there is auth token present, else ``False``
:rtype: ``bool``
"""
- return len(self.get_cookies()) > 0
+ auth_token_key = "splunkd_"
+ return any(auth_token_key in key for key in self.get_cookies().keys())
# Shared per-context request headers
@property
@@ -508,9 +527,12 @@ def _auth_headers(self):
:returns: A list of 2-tuples containing key and value
"""
if self.has_cookies():
- return [("Cookie", _make_cookie_header(self.get_cookies().items()))]
+ return [("Cookie", _make_cookie_header(list(self.get_cookies().items())))]
elif self.basic and (self.username and self.password):
- token = 'Basic %s' % b64encode("%s:%s" % (self.username, self.password))
+ token = 'Basic %s' % b64encode(("%s:%s" % (self.username, self.password)).encode('utf-8')).decode('ascii')
+ return [("Authorization", token)]
+ elif self.bearerToken:
+ token = 'Bearer %s' % self.bearerToken
return [("Authorization", token)]
elif self.token is _NoAuthenticationToken:
return []
@@ -603,13 +625,13 @@ def delete(self, path_segment, owner=None, app=None, sharing=None, **query):
"""
path = self.authority + self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
- logging.debug("DELETE request to %s (body: %s)", path, repr(query))
+ logger.debug("DELETE request to %s (body: %s)", path, repr(query))
response = self.http.delete(path, self._auth_headers, **query)
return response
@_authentication
@_log_duration
- def get(self, path_segment, owner=None, app=None, sharing=None, **query):
+ def get(self, path_segment, owner=None, app=None, headers=None, sharing=None, **query):
"""Performs a GET operation from the REST path segment with the given
namespace and query.
@@ -632,6 +654,8 @@ def get(self, path_segment, owner=None, app=None, sharing=None, **query):
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
+ :param headers: List of extra HTTP headers to send (optional).
+ :type headers: ``list`` of 2-tuples.
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
:param query: All other keyword arguments, which are used as query
@@ -659,10 +683,14 @@ def get(self, path_segment, owner=None, app=None, sharing=None, **query):
c.logout()
c.get('apps/local') # raises AuthenticationError
"""
+ if headers is None:
+ headers = []
+
path = self.authority + self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
- logging.debug("GET request to %s (body: %s)", path, repr(query))
- response = self.http.get(path, self._auth_headers, **query)
+ logger.debug("GET request to %s (body: %s)", path, repr(query))
+ all_headers = headers + self.additional_headers + self._auth_headers
+ response = self.http.get(path, all_headers, **query)
return response
@_authentication
@@ -703,7 +731,12 @@ def post(self, path_segment, owner=None, app=None, sharing=None, headers=None, *
:type headers: ``list`` of 2-tuples.
:param query: All other keyword arguments, which are used as query
parameters.
- :type query: ``string``
+ :param body: Parameters to be used in the post body. If specified,
+ any parameters in the query will be applied to the URL instead of
+ the body. If a dict is supplied, the key-value pairs will be form
+ encoded. If a string is supplied, the body will be passed through
+ in the request unchanged.
+ :type body: ``dict`` or ``str``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
@@ -733,14 +766,20 @@ def post(self, path_segment, owner=None, app=None, sharing=None, headers=None, *
headers = []
path = self.authority + self._abspath(path_segment, owner=owner, app=app, sharing=sharing)
- logging.debug("POST request to %s (body: %s)", path, repr(query))
- all_headers = headers + self._auth_headers
+
+ # To avoid writing sensitive data in debug logs
+ endpoint_having_sensitive_data = ["/storage/passwords"]
+ if any(endpoint in path for endpoint in endpoint_having_sensitive_data):
+ logger.debug("POST request to %s ", path)
+ else:
+ logger.debug("POST request to %s (body: %s)", path, repr(query))
+ all_headers = headers + self.additional_headers + self._auth_headers
response = self.http.post(path, all_headers, **query)
return response
@_authentication
@_log_duration
- def request(self, path_segment, method="GET", headers=None, body="",
+ def request(self, path_segment, method="GET", headers=None, body={},
owner=None, app=None, sharing=None):
"""Issues an arbitrary HTTP request to the REST path segment.
@@ -769,9 +808,6 @@ def request(self, path_segment, method="GET", headers=None, body="",
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
- :param query: All other keyword arguments, which are used as query
- parameters.
- :type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
@@ -800,13 +836,28 @@ def request(self, path_segment, method="GET", headers=None, body="",
path = self.authority \
+ self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
- all_headers = headers + self._auth_headers
- logging.debug("%s request to %s (headers: %s, body: %s)",
+
+ all_headers = headers + self.additional_headers + self._auth_headers
+ logger.debug("%s request to %s (headers: %s, body: %s)",
method, path, str(all_headers), repr(body))
- response = self.http.request(path,
- {'method': method,
- 'headers': all_headers,
- 'body': body})
+
+ if body:
+ body = _encode(**body)
+
+ if method == "GET":
+ path = path + UrlEncoded('?' + body, skip_encode=True)
+ message = {'method': method,
+ 'headers': all_headers}
+ else:
+ message = {'method': method,
+ 'headers': all_headers,
+ 'body': body}
+ else:
+ message = {'method': method,
+ 'headers': all_headers}
+
+ response = self.http.request(path, message)
+
return response
def login(self):
@@ -848,12 +899,17 @@ def login(self):
# as credentials were passed in.
return
+ if self.bearerToken:
+ # Bearer auth mode requested, so this method is a nop as long
+ # as authentication token was passed in.
+ return
# Only try to get a token and updated cookie if username & password are specified
try:
response = self.http.post(
self.authority + self._abspath("/services/auth/login"),
username=self.username,
password=self.password,
+ headers=self.additional_headers,
cookie="1") # In Splunk 6.2+, passing "cookie=1" will return the "set-cookie" header
body = response.body.read()
@@ -964,6 +1020,8 @@ def connect(**kwargs):
:type username: ``string``
:param password: The password for the Splunk account.
:type password: ``string``
+ :param headers: List of extra HTTP headers to send (optional).
+ :type headers: ``list`` of 2-tuples.
:param autologin: When ``True``, automatically tries to log in again if the
session terminates.
:type autologin: ``Boolean``
@@ -1011,7 +1069,7 @@ class AuthenticationError(HTTPError):
def __init__(self, message, cause):
# Put the body back in the response so that HTTPError's constructor can
# read it again.
- cause._response.body = StringIO(cause.body)
+ cause._response.body = BytesIO(cause.body)
HTTPError.__init__(self, cause._response, message)
@@ -1037,27 +1095,28 @@ def __init__(self, message, cause):
#
# Encode the given kwargs as a query string. This wrapper will also _encode
-# a list value as a sequence of assignemnts to the corresponding arg name,
+# a list value as a sequence of assignments to the corresponding arg name,
# for example an argument such as 'foo=[1,2,3]' will be encoded as
# 'foo=1&foo=2&foo=3'.
def _encode(**kwargs):
items = []
- for key, value in kwargs.iteritems():
+ for key, value in six.iteritems(kwargs):
if isinstance(value, list):
items.extend([(key, item) for item in value])
else:
items.append((key, value))
- return urllib.urlencode(items)
+ return urllib.parse.urlencode(items)
# Crack the given url into (scheme, host, port, path)
def _spliturl(url):
- scheme, opaque = urllib.splittype(url)
- netloc, path = urllib.splithost(opaque)
- host, port = urllib.splitport(netloc)
+ parsed_url = urllib.parse.urlparse(url)
+ host = parsed_url.hostname
+ port = parsed_url.port
+ path = '?'.join((parsed_url.path, parsed_url.query)) if parsed_url.query else parsed_url.path
# Strip brackets if its an IPv6 address
if host.startswith('[') and host.endswith(']'): host = host[1:-1]
if port is None: port = DEFAULT_PORT
- return scheme, host, port, path
+ return parsed_url.scheme, host, port, path
# Given an HTTP request handler, this wrapper objects provides a related
# family of convenience methods built using that handler.
@@ -1100,10 +1159,17 @@ class HttpLib(object):
The response dictionary is returned directly by ``HttpLib``'s methods with
no further processing. By default, ``HttpLib`` calls the :func:`handler` function
to get a handler function.
+
+ If using the default handler, SSL verification can be disabled by passing verify=False.
"""
- def __init__(self, custom_handler=None):
- self.handler = handler() if custom_handler is None else custom_handler
+ def __init__(self, custom_handler=None, verify=False, key_file=None, cert_file=None, context=None, retries=0, retryDelay=10):
+ if custom_handler is None:
+ self.handler = handler(verify=verify, key_file=key_file, cert_file=cert_file, context=context)
+ else:
+ self.handler = custom_handler
self._cookies = {}
+ self.retries = retries
+ self.retryDelay = retryDelay
def delete(self, url, headers=None, **kwargs):
"""Sends a DELETE request to a URL.
@@ -1183,16 +1249,18 @@ def post(self, url, headers=None, **kwargs):
# to support the receivers/stream endpoint.
if 'body' in kwargs:
# We only use application/x-www-form-urlencoded if there is no other
- # Content-Type header present. This can happen in cases where we
+ # Content-Type header present. This can happen in cases where we
# send requests as application/json, e.g. for KV Store.
- if len(filter(lambda x: x[0].lower() == "content-type", headers)) == 0:
+ if len([x for x in headers if x[0].lower() == "content-type"]) == 0:
headers.append(("Content-Type", "application/x-www-form-urlencoded"))
body = kwargs.pop('body')
+ if isinstance(body, dict):
+ body = _encode(**body).encode('utf-8')
if len(kwargs) > 0:
url = url + UrlEncoded('?' + _encode(**kwargs), skip_encode=True)
else:
- body = _encode(**kwargs)
+ body = _encode(**kwargs).encode('utf-8')
message = {
'method': "POST",
'headers': headers,
@@ -1215,7 +1283,16 @@ def request(self, url, message, **kwargs):
its structure).
:rtype: ``dict``
"""
- response = self.handler(url, message, **kwargs)
+ while True:
+ try:
+ response = self.handler(url, message, **kwargs)
+ break
+ except Exception:
+ if self.retries <= 0:
+ raise
+ else:
+ time.sleep(self.retryDelay)
+ self.retries -= 1
response = record(response)
if 400 <= response.status:
raise HTTPError(response)
@@ -1226,7 +1303,7 @@ def request(self, url, message, **kwargs):
# If response.headers is a dict, get the key-value pairs as 2-tuples
# this is the case when using urllib2
if isinstance(response.headers, dict):
- key_value_tuples = response.headers.items()
+ key_value_tuples = list(response.headers.items())
for key, value in key_value_tuples:
if key.lower() == "set-cookie":
_parse_cookies(value, self._cookies)
@@ -1248,15 +1325,18 @@ class ResponseReader(io.RawIOBase):
def __init__(self, response, connection=None):
self._response = response
self._connection = connection
- self._buffer = ''
+ self._buffer = b''
def __str__(self):
- return self.read()
+ if six.PY2:
+ return self.read()
+ else:
+ return str(self.read(), 'UTF-8')
@property
def empty(self):
"""Indicates whether there is any more data in the response."""
- return self.peek(1) == ""
+ return self.peek(1) == b""
def peek(self, size):
"""Nondestructively retrieves a given number of characters.
@@ -1273,8 +1353,8 @@ def peek(self, size):
def close(self):
"""Closes this response."""
- if _connection:
- _connection.close()
+ if self._connection:
+ self._connection.close()
self._response.close()
def read(self, size = None):
@@ -1286,7 +1366,7 @@ def read(self, size = None):
"""
r = self._buffer
- self._buffer = ''
+ self._buffer = b''
if size is not None:
size -= len(r)
r = r + self._response.read(size)
@@ -1310,7 +1390,7 @@ def readinto(self, byte_array):
return bytes_read
-def handler(key_file=None, cert_file=None, timeout=None):
+def handler(key_file=None, cert_file=None, timeout=None, verify=False, context=None):
"""This class returns an instance of the default HTTP request handler using
the values you provide.
@@ -1320,21 +1400,28 @@ def handler(key_file=None, cert_file=None, timeout=None):
:type cert_file: ``string``
:param `timeout`: The request time-out period, in seconds (optional).
:type timeout: ``integer`` or "None"
+ :param `verify`: Set to False to disable SSL verification on https connections.
+ :type verify: ``Boolean``
+ :param `context`: The SSLContext that can is used with the HTTPSConnection when verify=True is enabled and context is specified
+ :type context: ``SSLContext`
"""
def connect(scheme, host, port):
kwargs = {}
if timeout is not None: kwargs['timeout'] = timeout
if scheme == "http":
- return httplib.HTTPConnection(host, port, **kwargs)
+ return six.moves.http_client.HTTPConnection(host, port, **kwargs)
if scheme == "https":
if key_file is not None: kwargs['key_file'] = key_file
if cert_file is not None: kwargs['cert_file'] = cert_file
- # If running Python 2.7.9+, disable SSL certificate validation
- if sys.version_info >= (2,7,9) and key_file is None and cert_file is None:
+ if not verify:
kwargs['context'] = ssl._create_unverified_context()
- return httplib.HTTPSConnection(host, port, **kwargs)
+ elif context:
+ # verify is True in elif branch and context is not None
+ kwargs['context'] = context
+
+ return six.moves.http_client.HTTPSConnection(host, port, **kwargs)
raise ValueError("unsupported scheme: %s" % scheme)
def request(url, message, **kwargs):
@@ -1343,7 +1430,7 @@ def request(url, message, **kwargs):
head = {
"Content-Length": str(len(body)),
"Host": host,
- "User-Agent": "splunk-sdk-python/1.6.0",
+ "User-Agent": "splunk-sdk-python/1.6.20",
"Accept": "*/*",
"Connection": "Close",
} # defaults
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/client.py b/bin/ta_dmarc/solnlib/packages/splunklib/client.py
index 982fd41..35d9e4f 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/client.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/client.py
@@ -58,18 +58,24 @@
my_app.package() # Creates a compressed package of this application
"""
+import contextlib
import datetime
import json
-import urllib
import logging
-from time import sleep
-from datetime import datetime, timedelta
import socket
-import contextlib
+from datetime import datetime, timedelta
+from time import sleep
+
+from splunklib import six
+from splunklib.six.moves import urllib
-from .binding import Context, HTTPError, AuthenticationError, namespace, UrlEncoded, _encode, _make_cookie_header
-from .data import record
from . import data
+from .binding import (AuthenticationError, Context, HTTPError, UrlEncoded,
+ _encode, _make_cookie_header, _NoAuthenticationToken,
+ namespace)
+from .data import record
+
+logger = logging.getLogger(__name__)
__all__ = [
"connect",
@@ -100,8 +106,8 @@
PATH_SAVED_SEARCHES = "saved/searches/"
PATH_STANZA = "configs/conf-%s/%s" # (file, stanza)
PATH_USERS = "authentication/users/"
-PATH_RECEIVERS_STREAM = "receivers/stream"
-PATH_RECEIVERS_SIMPLE = "receivers/simple"
+PATH_RECEIVERS_STREAM = "/services/receivers/stream"
+PATH_RECEIVERS_SIMPLE = "/services/receivers/simple"
PATH_STORAGE_PASSWORDS = "storage/passwords"
XNAMEF_ATOM = "{http://www.w3.org/2005/Atom}%s"
@@ -182,7 +188,7 @@ def _trailing(template, *targets):
def _filter_content(content, *args):
if len(args) > 0:
return record((k, content[k]) for k in args)
- return record((k, v) for k, v in content.iteritems()
+ return record((k, v) for k, v in six.iteritems(content)
if k not in ['eai:acl', 'eai:attributes', 'type'])
# Construct a resource path from the given base path + resource name
@@ -192,8 +198,11 @@ def _path(base, name):
# Load an atom record from the body of the given response
+# this will ultimately be sent to an xml ElementTree so we
+# should use the xmlcharrefreplace option
def _load_atom(response, match=None):
- return data.load(response.body.read(), match)
+ return data.load(response.body.read()
+ .decode('utf-8', 'xmlcharrefreplace'), match)
# Load an array of atom entries from the body of the given response
@@ -217,7 +226,10 @@ def _load_atom_entries(response):
# Load the sid from the body of the given response
-def _load_sid(response):
+def _load_sid(response, output_mode):
+ if output_mode == "json":
+ json_obj = json.loads(response.body.read())
+ return json_obj.get('sid')
return _load_atom(response).response.sid
@@ -236,7 +248,7 @@ def _parse_atom_entry(entry):
metadata = _parse_atom_metadata(content)
# Filter some of the noise out of the content record
- content = record((k, v) for k, v in content.iteritems()
+ content = record((k, v) for k, v in six.iteritems(content)
if k not in ['eai:acl', 'eai:attributes'])
if 'type' in content:
@@ -288,6 +300,9 @@ def connect(**kwargs):
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
+ :param verify: Enable (True) or disable (False) SSL verification for
+ https connections. (optional, the default is True)
+ :type verify: ``Boolean``
:param `owner`: The owner context of the namespace (optional).
:type owner: ``string``
:param `app`: The app context of the namespace (optional).
@@ -308,6 +323,13 @@ def connect(**kwargs):
:type username: ``string``
:param `password`: The password for the Splunk account.
:type password: ``string``
+ :param retires: Number of retries for each HTTP connection (optional, the default is 0).
+ NOTE THAT THIS MAY INCREASE THE NUMBER OF ROUND TRIP CONNECTIONS TO THE SPLUNK SERVER.
+ :type retries: ``int``
+ :param retryDelay: How long to wait between connection attempts if `retries` > 0 (optional, defaults to 10s).
+ :type retryDelay: ``int`` (in seconds)
+ :param `context`: The SSLContext that can be used when setting verify=True (optional)
+ :type context: ``SSLContext``
:return: An initialized :class:`Service` connection.
**Example**::
@@ -355,6 +377,9 @@ class Service(_BaseService):
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
+ :param verify: Enable (True) or disable (False) SSL verification for
+ https connections. (optional, the default is True)
+ :type verify: ``Boolean``
:param `owner`: The owner context of the namespace (optional; use "-" for wildcard).
:type owner: ``string``
:param `app`: The app context of the namespace (optional; use "-" for wildcard).
@@ -371,6 +396,11 @@ class Service(_BaseService):
:param `password`: The password, which is used to authenticate the Splunk
instance.
:type password: ``string``
+ :param retires: Number of retries for each HTTP connection (optional, the default is 0).
+ NOTE THAT THIS MAY INCREASE THE NUMBER OF ROUND TRIP CONNECTIONS TO THE SPLUNK SERVER.
+ :type retries: ``int``
+ :param retryDelay: How long to wait between connection attempts if `retries` > 0 (optional, defaults to 10s).
+ :type retryDelay: ``int`` (in seconds)
:return: A :class:`Service` instance.
**Example**::
@@ -388,6 +418,7 @@ class Service(_BaseService):
def __init__(self, **kwargs):
super(Service, self).__init__(**kwargs)
self._splunk_version = None
+ self._kvstore_owner = None
@property
def apps(self):
@@ -450,6 +481,13 @@ def info(self):
response = self.get("/services/server/info")
return _filter_content(_load_atom(response, MATCH_ENTRY_CONTENT))
+ def input(self, path, kind=None):
+ """Retrieves an input by path, and optionally kind.
+
+ :return: A :class:`Input` object.
+ """
+ return Input(self, path, kind=kind).refresh()
+
@property
def inputs(self):
"""Returns the collection of inputs configured on this Splunk instance.
@@ -550,7 +588,7 @@ def restart(self, timeout=None):
# This message will be deleted once the server actually restarts.
self.messages.create(name="restart_required", **msg)
result = self.post("/services/server/control/restart")
- if timeout is None:
+ if timeout is None:
return result
start = datetime.now()
diff = timedelta(seconds=timeout)
@@ -559,9 +597,9 @@ def restart(self, timeout=None):
self.login()
if not self.restart_required:
return result
- except Exception, e:
+ except Exception as e:
sleep(1)
- raise Exception, "Operation time out."
+ raise Exception("Operation time out.")
@property
def restart_required(self):
@@ -653,12 +691,34 @@ def splunk_version(self):
self._splunk_version = tuple([int(p) for p in self.info['version'].split('.')])
return self._splunk_version
+ @property
+ def kvstore_owner(self):
+ """Returns the KVStore owner for this instance of Splunk.
+
+ By default is the kvstore owner is not set, it will return "nobody"
+ :return: A string with the KVStore owner.
+ """
+ if self._kvstore_owner is None:
+ self._kvstore_owner = "nobody"
+ return self._kvstore_owner
+
+ @kvstore_owner.setter
+ def kvstore_owner(self, value):
+ """
+ kvstore is refreshed, when the owner value is changed
+ """
+ self._kvstore_owner = value
+ self.kvstore
+
@property
def kvstore(self):
"""Returns the collection of KV Store collections.
+ sets the owner for the namespace, before retrieving the KVStore Collection
+
:return: A :class:`KVStoreCollections` collection of :class:`KVStoreCollection` entities.
"""
+ self.namespace['owner'] = self.kvstore_owner
return KVStoreCollections(self)
@property
@@ -679,7 +739,7 @@ class Endpoint(object):
"""
def __init__(self, service, path):
self.service = service
- self.path = path if path.endswith('/') else path + '/'
+ self.path = path
def get(self, path_segment="", owner=None, app=None, sharing=None, **query):
"""Performs a GET operation on the path segment relative to this endpoint.
@@ -737,6 +797,8 @@ def get(self, path_segment="", owner=None, app=None, sharing=None, **query):
if path_segment.startswith('/'):
path = path_segment
else:
+ if not self.path.endswith('/') and path_segment != "":
+ self.path = self.path + '/'
path = self.service._abspath(self.path + path_segment, owner=owner,
app=app, sharing=sharing)
# ^-- This was "%s%s" % (self.path, path_segment).
@@ -797,6 +859,8 @@ def post(self, path_segment="", owner=None, app=None, sharing=None, **query):
if path_segment.startswith('/'):
path = path_segment
else:
+ if not self.path.endswith('/') and path_segment != "":
+ self.path = self.path + '/'
path = self.service._abspath(self.path + path_segment, owner=owner, app=app, sharing=sharing)
return self.service.post(path, owner=owner, app=app, sharing=sharing, **query)
@@ -808,35 +872,24 @@ class Entity(Endpoint):
``Entity`` provides the majority of functionality required by entities.
Subclasses only implement the special cases for individual entities.
- For example for deployment serverclasses, the subclass makes whitelists and
- blacklists into Python lists.
+ For example for saved searches, the subclass makes fields like ``action.email``,
+ ``alert_type``, and ``search`` available.
An ``Entity`` is addressed like a dictionary, with a few extensions,
- so the following all work::
-
- ent['email.action']
- ent['disabled']
- ent['whitelist']
-
- Many endpoints have values that share a prefix, such as
- ``email.to``, ``email.action``, and ``email.subject``. You can extract
- the whole fields, or use the key ``email`` to get a dictionary of
- all the subelements. That is, ``ent['email']`` returns a
- dictionary with the keys ``to``, ``action``, ``subject``, and so on. If
- there are multiple levels of dots, each level is made into a
- subdictionary, so ``email.body.salutation`` can be accessed at
- ``ent['email']['body']['salutation']`` or
- ``ent['email.body.salutation']``.
+ so the following all work, for example in saved searches::
+
+ ent['action.email']
+ ent['alert_type']
+ ent['search']
You can also access the fields as though they were the fields of a Python
object, as in::
- ent.email.action
- ent.disabled
- ent.whitelist
+ ent.alert_type
+ ent.search
However, because some of the field names are not valid Python identifiers,
- the dictionary-like syntax is preferrable.
+ the dictionary-like syntax is preferable.
The state of an :class:`Entity` object is cached, so accessing a field
does not contact the server. If you think the values on the
@@ -884,7 +937,7 @@ def __contains__(self, item):
try:
self[item]
return True
- except KeyError:
+ except (KeyError, AttributeError):
return False
def __eq__(self, other):
@@ -933,7 +986,10 @@ def __getitem__(self, key):
def _load_atom_entry(self, response):
elem = _load_atom(response, XNAME_ENTRY)
if isinstance(elem, list):
- raise AmbiguousReferenceException("Fetch from server returned multiple entries for name %s." % self.name)
+ apps = [ele.entry.content.get('eai:appName') for ele in elem]
+
+ raise AmbiguousReferenceException(
+ "Fetch from server returned multiple entries for name '%s' in apps %s." % (elem[0].entry.title, apps))
else:
return elem.entry
@@ -1039,8 +1095,6 @@ def content(self):
def disable(self):
"""Disables the entity at this endpoint."""
self.post("disable")
- if self.service.restart_required:
- self.service.restart(120)
return self
def enable(self):
@@ -1081,7 +1135,7 @@ def read(self, response):
# text to be dispatched via HTTP. However, these links are already
# URL encoded when they arrive, and we need to mark them as such.
unquoted_links = dict([(k, UrlEncoded(v, skip_encode=True))
- for k,v in results['links'].iteritems()])
+ for k,v in six.iteritems(results['links'])])
results['links'] = unquoted_links
return results
@@ -1187,7 +1241,7 @@ def __getitem__(self, key):
:raises ValueError: Raised if no namespace is specified and *key*
does not refer to a unique name.
- *Example*::
+ **Example**::
s = client.connect(...)
saved_searches = s.saved_searches
@@ -1290,7 +1344,7 @@ def _entity_path(self, state):
# This has been factored out so that it can be easily
# overloaded by Configurations, which has to switch its
# entities' endpoints from its own properties/ to configs/.
- raw_path = urllib.unquote(state.links.alternate)
+ raw_path = urllib.parse.unquote(state.links.alternate)
if 'servicesNS/' in raw_path:
return _trailing(raw_path, 'servicesNS/', '/', '/')
elif 'services/' in raw_path:
@@ -1424,7 +1478,7 @@ def iter(self, offset=0, count=None, pagesize=None, **kwargs):
if pagesize is None or N < pagesize:
break
offset += N
- logging.debug("pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s", pagesize, fetched, offset, N, kwargs)
+ logger.debug("pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s", pagesize, fetched, offset, N, kwargs)
# kwargs: count, offset, search, sort_dir, sort_key, sort_mode
def list(self, count=None, **kwargs):
@@ -1534,7 +1588,7 @@ def create(self, name, **params):
applications = s.apps
new_app = applications.create("my_fake_app")
"""
- if not isinstance(name, basestring):
+ if not isinstance(name, six.string_types):
raise InvalidNameException("%s is not a valid name for an entity." % name)
if 'namespace' in params:
namespace = params.pop('namespace')
@@ -1623,9 +1677,9 @@ def get(self, name="", owner=None, app=None, sharing=None, **query):
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
- Example:
-
- import splunklib.client
+ **Example**::
+
+ import splunklib.client
s = client.service(...)
saved_searches = s.saved_searches
saved_searches.get("my/saved/search") == \\
@@ -1678,7 +1732,7 @@ def __getitem__(self, key):
# The superclass implementation is designed for collections that contain
# entities. This collection (Configurations) contains collections
# (ConfigurationFile).
- #
+ #
# The configurations endpoint returns multiple entities when we ask for a single file.
# This screws up the default implementation of __getitem__ from Collection, which thinks
# that multiple entities means a name collision, so we have to override it here.
@@ -1717,7 +1771,7 @@ def create(self, name):
# This has to be overridden to handle the plumbing of creating
# a ConfigurationFile (which is a Collection) instead of some
# Entity.
- if not isinstance(name, basestring):
+ if not isinstance(name, six.string_types):
raise ValueError("Invalid name: %s" % repr(name))
response = self.post(__conf=name)
if response.status == 303:
@@ -1742,9 +1796,9 @@ class Stanza(Entity):
"""This class contains a single configuration stanza."""
def submit(self, stanza):
- """Adds keys to the current configuration stanza as a
+ """Adds keys to the current configuration stanza as a
dictionary of key-value pairs.
-
+
:param stanza: A dictionary of key-value pairs for the stanza.
:type stanza: ``dict``
:return: The :class:`Stanza` object.
@@ -1811,7 +1865,7 @@ def create(self, password, username, realm=None):
:return: The :class:`StoragePassword` object created.
"""
- if not isinstance(username, basestring):
+ if not isinstance(username, six.string_types):
raise ValueError("Invalid name: %s" % repr(username))
if realm is None:
@@ -1852,7 +1906,7 @@ def delete(self, username, realm=None):
name = UrlEncoded(realm, encode_slash=True) + ":" + UrlEncoded(username, encode_slash=True)
# Append the : expected at the end of the name
- if name[-1] is not ":":
+ if name[-1] != ":":
name = name + ":"
return Collection.delete(self, name)
@@ -1935,9 +1989,11 @@ def attach(self, host=None, source=None, sourcetype=None):
if host is not None: args['host'] = host
if source is not None: args['source'] = source
if sourcetype is not None: args['sourcetype'] = sourcetype
- path = UrlEncoded(PATH_RECEIVERS_STREAM + "?" + urllib.urlencode(args), skip_encode=True)
+ path = UrlEncoded(PATH_RECEIVERS_STREAM + "?" + urllib.parse.urlencode(args), skip_encode=True)
- cookie_or_auth_header = "Authorization: %s\r\n" % self.service.token
+ cookie_or_auth_header = "Authorization: Splunk %s\r\n" % \
+ (self.service.token if self.service.token is _NoAuthenticationToken
+ else self.service.token.replace("Splunk ", ""))
# If we have cookie(s), use them instead of "Authorization: ..."
if self.service.has_cookies():
@@ -1947,13 +2003,13 @@ def attach(self, host=None, source=None, sourcetype=None):
# the connection open and use the Splunk extension headers to note
# the input mode
sock = self.service.connect()
- headers = ["POST %s HTTP/1.1\r\n" % self.service._abspath(path),
- "Host: %s:%s\r\n" % (self.service.host, int(self.service.port)),
- "Accept-Encoding: identity\r\n",
- cookie_or_auth_header,
- "X-Splunk-Input-Mode: Streaming\r\n",
- "\r\n"]
-
+ headers = [("POST %s HTTP/1.1\r\n" % str(self.service._abspath(path))).encode('utf-8'),
+ ("Host: %s:%s\r\n" % (self.service.host, int(self.service.port))).encode('utf-8'),
+ b"Accept-Encoding: identity\r\n",
+ cookie_or_auth_header.encode('utf-8'),
+ b"X-Splunk-Input-Mode: Streaming\r\n",
+ b"\r\n"]
+
for h in headers:
sock.write(h)
return sock
@@ -2026,8 +2082,7 @@ def clean(self, timeout=60):
self.refresh()
if self.content.totalEventCount != '0':
- raise OperationError, "Cleaning index %s took longer than %s seconds; timing out." %\
- (self.name, timeout)
+ raise OperationError("Cleaning index %s took longer than %s seconds; timing out." % (self.name, timeout))
finally:
# Restore original values
self.update(maxTotalDataSizeMB=tds, frozenTimePeriodInSecs=ftp)
@@ -2065,10 +2120,6 @@ def submit(self, event, host=None, source=None, sourcetype=None):
if source is not None: args['source'] = source
if sourcetype is not None: args['sourcetype'] = sourcetype
- # The reason we use service.request directly rather than POST
- # is that we are not sending a POST request encoded using
- # x-www-form-urlencoded (as we do not have a key=value body),
- # because we aren't really sending a "form".
self.service.post(PATH_RECEIVERS_SIMPLE, body=event, **args)
return self
@@ -2295,7 +2346,7 @@ def create(self, name, kind, **kwargs):
path = _path(
self.path + kindpath,
'%s:%s' % (kwargs['restrictToHost'], name) \
- if kwargs.has_key('restrictToHost') else name
+ if 'restrictToHost' in kwargs else name
)
return Input(self.service, path, kind)
@@ -2430,15 +2481,12 @@ def kindpath(self, kind):
:return: The relative endpoint path.
:rtype: ``string``
"""
- if kind in self.kinds:
- return UrlEncoded(kind, skip_encode=True)
- # Special cases
- elif kind == 'tcp':
+ if kind == 'tcp':
return UrlEncoded('tcp/raw', skip_encode=True)
elif kind == 'splunktcp':
return UrlEncoded('tcp/cooked', skip_encode=True)
else:
- raise ValueError("No such kind on server: %s" % kind)
+ return UrlEncoded(kind, skip_encode=True)
def list(self, *kinds, **kwargs):
"""Returns a list of inputs that are in the :class:`Inputs` collection.
@@ -2499,13 +2547,13 @@ def list(self, *kinds, **kwargs):
kinds = self.kinds
if len(kinds) == 1:
kind = kinds[0]
- logging.debug("Inputs.list taking short circuit branch for single kind.")
+ logger.debug("Inputs.list taking short circuit branch for single kind.")
path = self.kindpath(kind)
- logging.debug("Path for inputs: %s", path)
+ logger.debug("Path for inputs: %s", path)
try:
path = UrlEncoded(path, skip_encode=True)
response = self.get(path, **kwargs)
- except HTTPError, he:
+ except HTTPError as he:
if he.status == 404: # No inputs of this kind
return []
entities = []
@@ -2517,7 +2565,7 @@ def list(self, *kinds, **kwargs):
# Unquote the URL, since all URL encoded in the SDK
# should be of type UrlEncoded, and all str should not
# be URL encoded.
- path = urllib.unquote(state.links.alternate)
+ path = urllib.parse.unquote(state.links.alternate)
entity = Input(self.service, path, kind, state=state)
entities.append(entity)
return entities
@@ -2543,7 +2591,7 @@ def list(self, *kinds, **kwargs):
# Unquote the URL, since all URL encoded in the SDK
# should be of type UrlEncoded, and all str should not
# be URL encoded.
- path = urllib.unquote(state.links.alternate)
+ path = urllib.parse.unquote(state.links.alternate)
entity = Input(self.service, path, kind, state=state)
entities.append(entity)
if 'offset' in kwargs:
@@ -2719,9 +2767,8 @@ def pause(self):
return self
def results(self, **query_params):
- """Returns a streaming handle to this job's search results. To get a
- nice, Pythonic iterator, pass the handle to :class:`splunklib.results.ResultsReader`,
- as in::
+ """Returns a streaming handle to this job's search results. To get a nice, Pythonic iterator, pass the handle
+ to :class:`splunklib.results.JSONResultsReader` along with the query param "output_mode='json'", as in::
import splunklib.client as client
import splunklib.results as results
@@ -2730,7 +2777,7 @@ def results(self, **query_params):
job = service.jobs.create("search * | head 5")
while not job.is_done():
sleep(.2)
- rr = results.ResultsReader(job.results())
+ rr = results.JSONResultsReader(job.results(output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -2760,19 +2807,17 @@ def results(self, **query_params):
def preview(self, **query_params):
"""Returns a streaming handle to this job's preview search results.
- Unlike :class:`splunklib.results.ResultsReader`, which requires a job to
- be finished to
- return any results, the ``preview`` method returns any results that have
- been generated so far, whether the job is running or not. The
- returned search results are the raw data from the server. Pass
- the handle returned to :class:`splunklib.results.ResultsReader` to get a
- nice, Pythonic iterator over objects, as in::
+ Unlike :class:`splunklib.results.JSONResultsReader`along with the query param "output_mode='json'",
+ which requires a job to be finished to return any results, the ``preview`` method returns any results that
+ have been generated so far, whether the job is running or not. The returned search results are the raw data
+ from the server. Pass the handle returned to :class:`splunklib.results.JSONResultsReader` to get a nice,
+ Pythonic iterator over objects, as in::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
job = service.jobs.create("search * | head 5")
- rr = results.ResultsReader(job.preview())
+ rr = results.JSONResultsReader(job.preview(output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -2923,19 +2968,19 @@ def create(self, query, **kwargs):
if kwargs.get("exec_mode", None) == "oneshot":
raise TypeError("Cannot specify exec_mode=oneshot; use the oneshot method instead.")
response = self.post(search=query, **kwargs)
- sid = _load_sid(response)
+ sid = _load_sid(response, kwargs.get("output_mode", None))
return Job(self.service, sid)
def export(self, query, **params):
- """Runs a search and immediately starts streaming preview events.
- This method returns a streaming handle to this job's events as an XML
- document from the server. To parse this stream into usable Python objects,
- pass the handle to :class:`splunklib.results.ResultsReader`::
+ """Runs a search and immediately starts streaming preview events. This method returns a streaming handle to
+ this job's events as an XML document from the server. To parse this stream into usable Python objects,
+ pass the handle to :class:`splunklib.results.JSONResultsReader` along with the query param
+ "output_mode='json'"::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
- rr = results.ResultsReader(service.jobs.export("search * | head 5"))
+ rr = results.JSONResultsReader(service.jobs.export("search * | head 5",output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -2984,14 +3029,14 @@ def itemmeta(self):
def oneshot(self, query, **params):
"""Run a oneshot search and returns a streaming handle to the results.
- The ``InputStream`` object streams XML fragments from the server. To
- parse this stream into usable Python objects,
- pass the handle to :class:`splunklib.results.ResultsReader`::
+ The ``InputStream`` object streams fragments from the server. To parse this stream into usable Python
+ objects, pass the handle to :class:`splunklib.results.JSONResultsReader` along with the query param
+ "output_mode='json'" ::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
- rr = results.ResultsReader(service.jobs.oneshot("search * | head 5"))
+ rr = results.JSONResultsReader(service.jobs.oneshot("search * | head 5",output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -3139,7 +3184,7 @@ def dispatch(self, **kwargs):
:return: The :class:`Job`.
"""
response = self.post("dispatch", **kwargs)
- sid = _load_sid(response)
+ sid = _load_sid(response, kwargs.get("output_mode", None))
return Job(self.service, sid)
@property
@@ -3360,7 +3405,7 @@ def create(self, username, password, roles, **params):
boris = users.create("boris", "securepassword", roles="user")
hilda = users.create("hilda", "anotherpassword", roles=["user","power"])
"""
- if not isinstance(username, basestring):
+ if not isinstance(username, six.string_types):
raise ValueError("Invalid username: %s" % str(username))
username = username.lower()
self.post(name=username, password=password, roles=roles, **params)
@@ -3371,7 +3416,7 @@ def create(self, username, password, roles, **params):
state = _parse_atom_entry(entry)
entity = self.item(
self.service,
- urllib.unquote(state.links.alternate),
+ urllib.parse.unquote(state.links.alternate),
state=state)
return entity
@@ -3483,7 +3528,7 @@ def create(self, name, **params):
roles = c.roles
paltry = roles.create("paltry", imported_roles="user", defaultApp="search")
"""
- if not isinstance(name, basestring):
+ if not isinstance(name, six.string_types):
raise ValueError("Invalid role name: %s" % str(name))
name = name.lower()
self.post(name=name, **params)
@@ -3494,7 +3539,7 @@ def create(self, name, **params):
state = _parse_atom_entry(entry)
entity = self.item(
self.service,
- urllib.unquote(state.links.alternate),
+ urllib.parse.unquote(state.links.alternate),
state=state)
return entity
@@ -3545,11 +3590,11 @@ def create(self, name, indexes = {}, fields = {}, **kwargs):
:return: Result of POST request
"""
- for k, v in indexes.iteritems():
+ for k, v in six.iteritems(indexes):
if isinstance(v, dict):
v = json.dumps(v)
kwargs['index.' + k] = v
- for k, v in fields.iteritems():
+ for k, v in six.iteritems(fields):
kwargs['field.' + k] = v
return self.post(name=name, **kwargs)
@@ -3558,7 +3603,7 @@ class KVStoreCollection(Entity):
def data(self):
"""Returns data object for this Collection.
- :rtype: :class:`KVStoreData`
+ :rtype: :class:`KVStoreCollectionData`
"""
return KVStoreCollectionData(self)
@@ -3573,7 +3618,7 @@ def update_index(self, name, value):
:return: Result of POST request
"""
kwargs = {}
- kwargs['index.' + name] = value if isinstance(value, basestring) else json.dumps(value)
+ kwargs['index.' + name] = value if isinstance(value, six.string_types) else json.dumps(value)
return self.post(**kwargs)
def update_field(self, name, value):
@@ -3601,7 +3646,7 @@ def __init__(self, collection):
self.service = collection.service
self.collection = collection
self.owner, self.app, self.sharing = collection._proper_namespace()
- self.path = 'storage/collections/data/' + UrlEncoded(self.collection.name) + '/'
+ self.path = 'storage/collections/data/' + UrlEncoded(self.collection.name, encode_slash=True) + '/'
def _get(self, url, **kwargs):
return self.service.get(self.path + url, owner=self.owner, app=self.app, sharing=self.sharing, **kwargs)
@@ -3622,7 +3667,12 @@ def query(self, **query):
:return: Array of documents retrieved by query.
:rtype: ``array``
"""
- return json.loads(self._get('', **query).body.read())
+
+ for key, value in query.items():
+ if isinstance(query[key], dict):
+ query[key] = json.dumps(value)
+
+ return json.loads(self._get('', **query).body.read().decode('utf-8'))
def query_by_id(self, id):
"""
@@ -3634,7 +3684,7 @@ def query_by_id(self, id):
:return: Document with id
:rtype: ``dict``
"""
- return json.loads(self._get(UrlEncoded(str(id))).body.read())
+ return json.loads(self._get(UrlEncoded(str(id), encode_slash=True)).body.read().decode('utf-8'))
def insert(self, data):
"""
@@ -3646,7 +3696,9 @@ def insert(self, data):
:return: _id of inserted object
:rtype: ``dict``
"""
- return json.loads(self._post('', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read())
+ if isinstance(data, dict):
+ data = json.dumps(data)
+ return json.loads(self._post('', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
def delete(self, query=None):
"""
@@ -3668,7 +3720,7 @@ def delete_by_id(self, id):
:return: Result of DELETE request
"""
- return self._delete(UrlEncoded(str(id)))
+ return self._delete(UrlEncoded(str(id), encode_slash=True))
def update(self, id, data):
"""
@@ -3682,7 +3734,9 @@ def update(self, id, data):
:return: id of replaced document
:rtype: ``dict``
"""
- return json.loads(self._post(UrlEncoded(str(id)), headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read())
+ if isinstance(data, dict):
+ data = json.dumps(data)
+ return json.loads(self._post(UrlEncoded(str(id), encode_slash=True), headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
def batch_find(self, *dbqueries):
"""
@@ -3690,16 +3744,16 @@ def batch_find(self, *dbqueries):
:param dbqueries: Array of individual queries as dictionaries
:type dbqueries: ``array`` of ``dict``
-
+
:return: Results of each query
:rtype: ``array`` of ``array``
"""
- if len(dbqueries) < 1:
+ if len(dbqueries) < 1:
raise Exception('Must have at least one query.')
-
+
data = json.dumps(dbqueries)
- return json.loads(self._post('batch_find', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read())
+ return json.loads(self._post('batch_find', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
def batch_save(self, *documents):
"""
@@ -3707,13 +3761,13 @@ def batch_save(self, *documents):
:param documents: Array of documents to save as dictionaries
:type documents: ``array`` of ``dict``
-
+
:return: Results of update operation as overall stats
:rtype: ``dict``
"""
- if len(documents) < 1:
+ if len(documents) < 1:
raise Exception('Must have at least one document.')
-
+
data = json.dumps(documents)
- return json.loads(self._post('batch_save', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read())
+ return json.loads(self._post('batch_save', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/data.py b/bin/ta_dmarc/solnlib/packages/splunklib/data.py
index 61431d9..f9ffb86 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/data.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/data.py
@@ -16,7 +16,10 @@
format, which is the format used by most of the REST API.
"""
+from __future__ import absolute_import
+import sys
from xml.etree.ElementTree import XML
+from splunklib import six
__all__ = ["load"]
@@ -74,6 +77,11 @@ def load(text, match=None):
'namespaces': [],
'names': {}
}
+
+ # Convert to unicode encoding in only python 2 for xml parser
+ if(sys.version_info < (3, 0, 0) and isinstance(text, unicode)):
+ text = text.encode('utf-8')
+
root = XML(text)
items = [root] if match is None else root.findall(match)
count = len(items)
@@ -88,7 +96,7 @@ def load(text, match=None):
def load_attrs(element):
if not hasattrs(element): return None
attrs = record()
- for key, value in element.attrib.iteritems():
+ for key, value in six.iteritems(element.attrib):
attrs[key] = value
return attrs
@@ -110,12 +118,12 @@ def load_elem(element, nametable=None):
if attrs is None: return name, value
if value is None: return name, attrs
# If value is simple, merge into attrs dict using special key
- if isinstance(value, str):
+ if isinstance(value, six.string_types):
attrs["$text"] = value
return name, attrs
# Both attrs & value are complex, so merge the two dicts, resolving collisions.
collision_keys = []
- for key, val in attrs.iteritems():
+ for key, val in six.iteritems(attrs):
if key in value and key in collision_keys:
value[key].append(val)
elif key in value and key not in collision_keys:
@@ -153,8 +161,8 @@ def load_value(element, nametable=None):
text = element.text
if text is None:
return None
- text = text.strip()
- if len(text) == 0:
+
+ if len(text.strip()) == 0:
return None
return text
@@ -169,7 +177,7 @@ def load_value(element, nametable=None):
for child in children:
name, item = load_elem(child, nametable)
# If we have seen this name before, promote the value to a list
- if value.has_key(name):
+ if name in value:
current = value[name]
if not isinstance(current, list):
value[name] = [current]
@@ -227,7 +235,7 @@ def __getitem__(self, key):
return dict.__getitem__(self, key)
key += self.sep
result = record()
- for k,v in self.iteritems():
+ for k,v in six.iteritems(self):
if not k.startswith(key):
continue
suffix = k[len(key):]
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/__init__.py b/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/__init__.py
index a4dccb1..ace954a 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/__init__.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/__init__.py
@@ -9,4 +9,4 @@
from .input_definition import InputDefinition
from .scheme import Scheme
from .script import Script
-from .validation_definition import ValidationDefinition
\ No newline at end of file
+from .validation_definition import ValidationDefinition
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/argument.py b/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/argument.py
index fed7bed..04214d1 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/argument.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/argument.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from __future__ import absolute_import
try:
import xml.etree.ElementTree as ET
except ImportError:
@@ -53,9 +54,9 @@ def __init__(self, name, description=None, validation=None,
:param name: ``string``, identifier for this argument in Splunk.
:param description: ``string``, human-readable description of the argument.
:param validation: ``string`` specifying how the argument should be validated, if using internal validation.
- If using external validation, this will be ignored.
+ If using external validation, this will be ignored.
:param data_type: ``string``, data type of this field; use the class constants.
- "data_type_boolean", "data_type_number", or "data_type_string".
+ "data_type_boolean", "data_type_number", or "data_type_string".
:param required_on_edit: ``Boolean``, whether this arg is required when editing an existing modular input of this kind.
:param required_on_create: ``Boolean``, whether this arg is required when creating a modular input of this kind.
:param title: ``String``, a human-readable title for the argument.
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/event.py b/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/event.py
index de1d4f1..9cd6cf3 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/event.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/event.py
@@ -12,6 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from __future__ import absolute_import
+from io import TextIOBase
+from splunklib.six import ensure_text
+
try:
import xml.etree.cElementTree as ET
except ImportError as ie:
@@ -103,5 +107,8 @@ def write_to(self, stream):
if self.done:
ET.SubElement(event, "done")
- stream.write(ET.tostring(event))
+ if isinstance(stream, TextIOBase):
+ stream.write(ensure_text(ET.tostring(event)))
+ else:
+ stream.write(ET.tostring(event))
stream.flush()
\ No newline at end of file
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/event_writer.py b/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/event_writer.py
old mode 100644
new mode 100755
index 418405f..5f8c5aa
--- a/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/event_writer.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/event_writer.py
@@ -12,18 +12,19 @@
# License for the specific language governing permissions and limitations
# under the License.
+from __future__ import absolute_import
import sys
+from splunklib.six import ensure_str
from .event import ET
try:
- from cStringIO import StringIO
+ from splunklib.six.moves import cStringIO as StringIO
except ImportError:
- from StringIO import StringIO
+ from splunklib.six import StringIO
class EventWriter(object):
"""``EventWriter`` writes events and error messages to Splunk from a modular input.
-
Its two important methods are ``writeEvent``, which takes an ``Event`` object,
and ``log``, which takes a severity and an error message.
"""
@@ -76,9 +77,11 @@ def write_xml_document(self, document):
:param document: An ``ElementTree`` object.
"""
- self._out.write(ET.tostring(document))
+ self._out.write(ensure_str(ET.tostring(document)))
self._out.flush()
def close(self):
"""Write the closing tag to make this XML well formed."""
- self._out.write("")
\ No newline at end of file
+ if self.header_written:
+ self._out.write("")
+ self._out.flush()
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/input_definition.py b/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/input_definition.py
index 3a2e1fa..fdc7cbb 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/input_definition.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/input_definition.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from __future__ import absolute_import
try:
import xml.etree.cElementTree as ET
except ImportError as ie:
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/scheme.py b/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/scheme.py
index c3aa812..4104e4a 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/scheme.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/scheme.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from __future__ import absolute_import
try:
import xml.etree.cElementTree as ET
except ImportError:
@@ -54,7 +55,7 @@ def add_argument(self, arg):
def to_xml(self):
"""Creates an ``ET.Element`` representing self, then returns it.
- :returns root, an ``ET.Element`` representing this scheme.
+ :returns: an ``ET.Element`` representing this scheme.
"""
root = ET.Element("scheme")
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/script.py b/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/script.py
index dddca8a..8595dc4 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/script.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/script.py
@@ -12,14 +12,16 @@
# License for the specific language governing permissions and limitations
# under the License.
+from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
-from urlparse import urlsplit
+from splunklib.six.moves.urllib.parse import urlsplit
import sys
from ..client import Service
from .event_writer import EventWriter
from .input_definition import InputDefinition
from .validation_definition import ValidationDefinition
+from splunklib import six
try:
import xml.etree.cElementTree as ET
@@ -27,7 +29,7 @@
import xml.etree.ElementTree as ET
-class Script(object):
+class Script(six.with_metaclass(ABCMeta, object)):
"""An abstract base class for implementing modular inputs.
Subclasses should override ``get_scheme``, ``stream_events``,
@@ -37,7 +39,6 @@ class Script(object):
The ``run`` function is used to run modular inputs; it typically should
not be overridden.
"""
- __metaclass__ = ABCMeta
def __init__(self):
self._input_definition = None
@@ -101,10 +102,10 @@ def run_script(self, args, event_writer, input_stream):
err_string = "ERROR Invalid arguments to modular input script:" + ' '.join(
args)
event_writer._err.write(err_string)
+ return 1
except Exception as e:
- err_string = EventWriter.ERROR + str(e.message)
- event_writer._err.write(err_string)
+ event_writer.log(EventWriter.ERROR, str(e))
return 1
@property
@@ -116,9 +117,9 @@ def service(self):
available as soon as the :code:`Script.stream_events` method is
called.
- :return: :class:splunklib.client.Service. A value of None is returned,
- if you call this method before the :code:`Script.stream_events` method
- is called.
+ :return: :class:`splunklib.client.Service`. A value of None is returned,
+ if you call this method before the :code:`Script.stream_events` method
+ is called.
"""
if self._service is not None:
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/utils.py b/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/utils.py
index f9de82f..3d42b63 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/utils.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/utils.py
@@ -14,6 +14,8 @@
# File for utility functions
+from __future__ import absolute_import
+from splunklib.six.moves import zip
def xml_compare(expected, found):
"""Checks equality of two ``ElementTree`` objects.
@@ -62,11 +64,14 @@ def parse_parameters(param_node):
def parse_xml_data(parent_node, child_node_tag):
data = {}
for child in parent_node:
+ child_name = child.get("name")
if child.tag == child_node_tag:
if child_node_tag == "stanza":
- data[child.get("name")] = {}
+ data[child_name] = {
+ "__app": child.get("app", None)
+ }
for param in child:
- data[child.get("name")][param.get("name")] = parse_parameters(param)
+ data[child_name][param.get("name")] = parse_parameters(param)
elif "item" == parent_node.tag:
- data[child.get("name")] = parse_parameters(child)
- return data
\ No newline at end of file
+ data[child_name] = parse_parameters(child)
+ return data
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/validation_definition.py b/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/validation_definition.py
index 72f8e7b..3bbe976 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/validation_definition.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/modularinput/validation_definition.py
@@ -13,6 +13,7 @@
# under the License.
+from __future__ import absolute_import
try:
import xml.etree.cElementTree as ET
except ImportError as ie:
@@ -27,7 +28,7 @@ class ValidationDefinition(object):
**Example**::
- ``v = ValidationDefinition()``
+ v = ValidationDefinition()
"""
def __init__(self):
@@ -45,23 +46,25 @@ def parse(stream):
The XML typically will look like this:
- ````
- `` myHost``
- `` https://127.0.0.1:8089``
- `` 123102983109283019283``
- `` /opt/splunk/var/lib/splunk/modinputs``
- ``
- ``
- `` value1``
- `` ``
- `` value2``
- `` value3``
- `` value4``
- `` ``
- ``
``
- ````
+ .. code-block:: xml
+
+
+ myHost
+ https://127.0.0.1:8089
+ 123102983109283019283
+ /opt/splunk/var/lib/splunk/modinputs
+ -
+ value1
+
+ value2
+ value3
+ value4
+
+
+
:param stream: ``Stream`` containing XML to parse.
- :return definition: A ``ValidationDefinition`` object.
+ :return: A ``ValidationDefinition`` object.
"""
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/results.py b/bin/ta_dmarc/solnlib/packages/splunklib/results.py
index ffc9b0b..8543ab0 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/results.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/results.py
@@ -23,7 +23,7 @@
accessing search results while avoiding buffering the result set, which can be
very large.
-To use the reader, instantiate :class:`ResultsReader` on a search result stream
+To use the reader, instantiate :class:`JSONResultsReader` on a search result stream
as follows:::
reader = ResultsReader(result_stream)
@@ -32,26 +32,34 @@
print "Results are a preview: %s" % reader.is_preview
"""
+from __future__ import absolute_import
+
+from io import BufferedReader, BytesIO
+
+from splunklib import six
+
+from splunklib.six import deprecated
+
try:
import xml.etree.cElementTree as et
except:
import xml.etree.ElementTree as et
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from .ordereddict import OrderedDict
+from collections import OrderedDict
+from json import loads as json_loads
try:
- from cStringIO import StringIO
+ from splunklib.six.moves import cStringIO as StringIO
except:
- from StringIO import StringIO
+ from splunklib.six import StringIO
__all__ = [
"ResultsReader",
- "Message"
+ "Message",
+ "JSONResultsReader"
]
+
class Message(object):
"""This class represents informational messages that Splunk interleaves in the results stream.
@@ -62,6 +70,7 @@ class Message(object):
m = Message("DEBUG", "There's something in that variable...")
"""
+
def __init__(self, type_, message):
self.type = type_
self.message = message
@@ -75,6 +84,7 @@ def __eq__(self, other):
def __hash__(self):
return hash((self.type, self.message))
+
class _ConcatenatedStream(object):
"""Lazily concatenate zero or more streams into a stream.
@@ -87,6 +97,7 @@ class _ConcatenatedStream(object):
s = _ConcatenatedStream(StringIO("abc"), StringIO("def"))
assert s.read() == "abcdef"
"""
+
def __init__(self, *streams):
self.streams = list(streams)
@@ -95,16 +106,17 @@ def read(self, n=None):
If *n* is ``None``, return all available characters.
"""
- response = ""
+ response = b""
while len(self.streams) > 0 and (n is None or n > 0):
txt = self.streams[0].read(n)
response += txt
if n is not None:
n -= len(txt)
- if n > 0 or n is None:
+ if n is None or n > 0:
del self.streams[0]
return response
+
class _XMLDTDFilter(object):
"""Lazily remove all XML DTDs from a stream.
@@ -118,6 +130,7 @@ class _XMLDTDFilter(object):
s = _XMLDTDFilter("")
assert s.read() == ""
"""
+
def __init__(self, stream):
self.stream = stream
@@ -126,17 +139,17 @@ def read(self, n=None):
If *n* is ``None``, return all available characters.
"""
- response = ""
+ response = b""
while n is None or n > 0:
c = self.stream.read(1)
- if c == "":
+ if c == b"":
break
- elif c == "<":
+ elif c == b"<":
c += self.stream.read(1)
- if c == "":
+ if c == b"":
while True:
q = self.stream.read(1)
- if q == ">":
+ if q == b">":
break
else:
response += c
@@ -148,6 +161,8 @@ def read(self, n=None):
n -= 1
return response
+
+@deprecated("Use the JSONResultsReader function instead in conjuction with the 'output_mode' query param set to 'json'")
class ResultsReader(object):
"""This class returns dictionaries and Splunk messages from an XML results
stream.
@@ -175,6 +190,7 @@ class ResultsReader(object):
print "Message: %s" % result
print "is_preview = %s " % reader.is_preview
"""
+
# Be sure to update the docstrings of client.Jobs.oneshot,
# client.Job.results_preview and client.Job.results to match any
# changes made to ResultsReader.
@@ -194,7 +210,7 @@ def __init__(self, stream):
# we remove all the DTD definitions inline, then wrap the
# fragments in a fiction element to make the parser happy.
stream = _XMLDTDFilter(stream)
- stream = _ConcatenatedStream(StringIO(""), stream, StringIO(""))
+ stream = _ConcatenatedStream(BytesIO(b""), stream, BytesIO(b""))
self.is_preview = None
self._gen = self._parse_results(stream)
@@ -202,7 +218,9 @@ def __iter__(self):
return self
def next(self):
- return self._gen.next()
+ return next(self._gen)
+
+ __next__ = next
def _parse_results(self, stream):
"""Parse results and messages out of *stream*."""
@@ -233,7 +251,7 @@ def _parse_results(self, stream):
if event == 'start':
values = []
elif event == 'end':
- field_name = elem.attrib['k'].encode('utf8')
+ field_name = elem.attrib['k']
if len(values) == 1:
result[field_name] = values[0]
else:
@@ -253,19 +271,19 @@ def _parse_results(self, stream):
# So we'll define it here
def __itertext(self):
- tag = self.tag
- if not isinstance(tag, basestring) and tag is not None:
- return
- if self.text:
- yield self.text
- for e in self:
- for s in __itertext(e):
- yield s
- if e.tail:
- yield e.tail
+ tag = self.tag
+ if not isinstance(tag, six.string_types) and tag is not None:
+ return
+ if self.text:
+ yield self.text
+ for e in self:
+ for s in __itertext(e):
+ yield s
+ if e.tail:
+ yield e.tail
text = "".join(__itertext(elem))
- values.append(text.encode('utf8'))
+ values.append(text)
elem.clear()
elif elem.tag == 'msg':
@@ -273,7 +291,7 @@ def __itertext(self):
msg_type = elem.attrib['type']
elif event == 'end':
text = elem.text if elem.text is not None else ""
- yield Message(msg_type, text.encode('utf8'))
+ yield Message(msg_type, text)
elem.clear()
except SyntaxError as pe:
# This is here to handle the same incorrect return from
@@ -284,5 +302,72 @@ def __itertext(self):
raise
+class JSONResultsReader(object):
+ """This class returns dictionaries and Splunk messages from a JSON results
+ stream.
+ ``JSONResultsReader`` is iterable, and returns a ``dict`` for results, or a
+ :class:`Message` object for Splunk messages. This class has one field,
+ ``is_preview``, which is ``True`` when the results are a preview from a
+ running search, or ``False`` when the results are from a completed search.
+
+ This function has no network activity other than what is implicit in the
+ stream it operates on.
+ :param `stream`: The stream to read from (any object that supports``.read()``).
+ **Example**::
+
+ import results
+ response = ... # the body of an HTTP response
+ reader = results.JSONResultsReader(response)
+ for result in reader:
+ if isinstance(result, dict):
+ print "Result: %s" % result
+ elif isinstance(result, results.Message):
+ print "Message: %s" % result
+ print "is_preview = %s " % reader.is_preview
+ """
+
+ # Be sure to update the docstrings of client.Jobs.oneshot,
+ # client.Job.results_preview and client.Job.results to match any
+ # changes made to JSONResultsReader.
+ #
+ # This wouldn't be a class, just the _parse_results function below,
+ # except that you cannot get the current generator inside the
+ # function creating that generator. Thus it's all wrapped up for
+ # the sake of one field.
+ def __init__(self, stream):
+ # The search/jobs/exports endpoint, when run with
+ # earliest_time=rt and latest_time=rt, output_mode=json, streams a sequence of
+ # JSON documents, each containing a result, as opposed to one
+ # results element containing lots of results.
+ stream = BufferedReader(stream)
+ self.is_preview = None
+ self._gen = self._parse_results(stream)
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ return next(self._gen)
+
+ __next__ = next
+
+ def _parse_results(self, stream):
+ """Parse results and messages out of *stream*."""
+ for line in stream.readlines():
+ strip_line = line.strip()
+ if strip_line.__len__() == 0: continue
+ parsed_line = json_loads(strip_line)
+ if "preview" in parsed_line:
+ self.is_preview = parsed_line["preview"]
+ if "messages" in parsed_line and parsed_line["messages"].__len__() > 0:
+ for message in parsed_line["messages"]:
+ msg_type = message.get("type", "Unknown Message Type")
+ text = message.get("text")
+ yield Message(msg_type, text)
+ if "result" in parsed_line:
+ yield parsed_line["result"]
+ if "results" in parsed_line:
+ for result in parsed_line["results"]:
+ yield result
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/__init__.py b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/__init__.py
index 12b14f3..8a92903 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/__init__.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/__init__.py
@@ -30,7 +30,7 @@
field-name = ( "_" / alpha ) *( alpha / digit / "_" / "." / "-" )
It does not show that :code:`field-name` values may be comma-separated. This is because Splunk strips commas from
- the command line. A search command will never see them.
+ the command line. A search command will never see them.
2. Search commands targeting versions of Splunk prior to 6.3 must be statically configured as follows:
@@ -134,9 +134,13 @@
.. topic:: References
- 1. `Search command style guide `_
+ 1. `Custom Search Command manual: `__
- 2. `Commands.conf.spec `_
+ 2. `Create Custom Search Commands with commands.conf.spec `_
+
+ 3. `Configure seach assistant with searchbnf.conf `_
+
+ 4. `Control search distribution with distsearch.conf `_
"""
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/decorators.py b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/decorators.py
index 1a0400f..d8b3f48 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/decorators.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/decorators.py
@@ -15,14 +15,12 @@
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
+from splunklib import six
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from ..ordereddict import OrderedDict
+from collections import OrderedDict # must be python 2.7
from inspect import getmembers, isclass, isfunction
-from itertools import imap
+from splunklib.six.moves import map as imap
from .internals import ConfigurationSettingsType, json_encode_string
from .validators import OptionName
@@ -35,7 +33,7 @@ class Configuration(object):
variable to search command classes that don't have one. The :code:`name` is derived from the name of the class.
By convention command class names end with the word "Command". To derive :code:`name` the word "Command" is removed
from the end of the class name and then converted to lower case for conformance with the `Search command style guide
- `_
+ `__
"""
def __init__(self, o=None, **kwargs):
@@ -69,15 +67,15 @@ def __call__(self, o):
# Set command name
name = o.__name__
- if name.endswith(b'Command'):
- name = name[:-len(b'Command')]
- o.name = unicode(name.lower())
+ if name.endswith('Command'):
+ name = name[:-len('Command')]
+ o.name = six.text_type(name.lower())
# Construct ConfigurationSettings instance for the command class
o.ConfigurationSettings = ConfigurationSettingsType(
- module=o.__module__ + b'.' + o.__name__,
- name=b'ConfigurationSettings',
+ module=o.__module__ + '.' + o.__name__,
+ name='ConfigurationSettings',
bases=(o.ConfigurationSettings,))
ConfigurationSetting.fix_up(o.ConfigurationSettings, self.settings)
@@ -138,7 +136,7 @@ def fix_up(cls, values):
for name, setting in definitions:
if setting._name is None:
- setting._name = name = unicode(name)
+ setting._name = name = six.text_type(name)
else:
name = setting._name
@@ -195,8 +193,8 @@ def is_supported_by_protocol(version):
del values[name]
if len(values) > 0:
- settings = sorted(list(values.iteritems()))
- settings = imap(lambda (n, v): '{}={}'.format(n, repr(v)), settings)
+ settings = sorted(list(six.iteritems(values)))
+ settings = imap(lambda n_v: '{}={}'.format(n_v[0], repr(n_v[1])), settings)
raise AttributeError('Inapplicable configuration settings: ' + ', '.join(settings))
cls.configuration_setting_definitions = definitions
@@ -228,8 +226,9 @@ class Option(property):
Short form (recommended). When you are satisfied with built-in or custom validation behaviors.
- .. code-block:: python
+ .. code-block:: python
:linenos:
+
from splunklib.searchcommands.decorators import Option
from splunklib.searchcommands.validators import Fieldname
@@ -246,8 +245,9 @@ class Option(property):
also provide a deleter. You must be prepared to accept a value of :const:`None` which indicates that your
:code:`Option` is unset.
- .. code-block:: python
+ .. code-block:: python
:linenos:
+
from splunklib.searchcommands import Option
@Option()
@@ -357,7 +357,7 @@ def __init__(self, command, option):
self._option = option
self._is_set = False
validator = self.validator
- self._format = unicode if validator is None else validator.format
+ self._format = six.text_type if validator is None else validator.format
def __repr__(self):
return '(' + repr(self.name) + ', ' + repr(self._format(self.value)) + ')'
@@ -417,24 +417,24 @@ class View(OrderedDict):
def __init__(self, command):
definitions = type(command).option_definitions
item_class = Option.Item
- OrderedDict.__init__(self, imap(lambda (name, option): (option.name, item_class(command, option)), definitions))
+ OrderedDict.__init__(self, ((option.name, item_class(command, option)) for (name, option) in definitions))
def __repr__(self):
- text = 'Option.View([' + ','.join(imap(lambda item: repr(item), self.itervalues())) + '])'
+ text = 'Option.View([' + ','.join(imap(lambda item: repr(item), six.itervalues(self))) + '])'
return text
def __str__(self):
- text = ' '.join([str(item) for item in self.itervalues() if item.is_set])
+ text = ' '.join([str(item) for item in six.itervalues(self) if item.is_set])
return text
# region Methods
def get_missing(self):
- missing = [item.name for item in self.itervalues() if item.is_required and not item.is_set]
+ missing = [item.name for item in six.itervalues(self) if item.is_required and not item.is_set]
return missing if len(missing) > 0 else None
def reset(self):
- for value in self.itervalues():
+ for value in six.itervalues(self):
value.reset()
pass
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/environment.py b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/environment.py
index 785042c..e92018f 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/environment.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/environment.py
@@ -18,7 +18,8 @@
from logging import getLogger, root, StreamHandler
from logging.config import fileConfig
-from os import chdir, environ, getcwdu, path
+from os import chdir, environ, path
+from splunklib.six.moves import getcwd
import sys
@@ -96,7 +97,7 @@ def configure_logging(logger_name, filename=None):
filename = path.realpath(filename)
if filename != _current_logging_configuration_file:
- working_directory = getcwdu()
+ working_directory = getcwd()
chdir(app_root)
try:
fileConfig(filename, {'SPLUNK_HOME': splunk_home})
@@ -112,7 +113,7 @@ def configure_logging(logger_name, filename=None):
_current_logging_configuration_file = None
-splunk_home = path.abspath(path.join(getcwdu(), environ.get('SPLUNK_HOME', '')))
+splunk_home = path.abspath(path.join(getcwd(), environ.get('SPLUNK_HOME', '')))
app_file = getattr(sys.modules['__main__'], '__file__', sys.executable)
app_root = path.dirname(path.abspath(path.dirname(app_file)))
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/eventing_command.py b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/eventing_command.py
index fde7aad..27dc13a 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/eventing_command.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/eventing_command.py
@@ -16,7 +16,8 @@
from __future__ import absolute_import, division, print_function, unicode_literals
-from itertools import imap
+from splunklib import six
+from splunklib.six.moves import map as imap
from .decorators import ConfigurationSetting
from .search_command import SearchCommand
@@ -113,10 +114,10 @@ class ConfigurationSettings(SearchCommand.ConfigurationSettings):
''')
- type = ConfigurationSetting(readonly=True, value='eventing', doc='''
+ type = ConfigurationSetting(readonly=True, value='events', doc='''
Command type
- Fixed: :const:`'eventing'`.
+ Fixed: :const:`'events'`.
Supported by: SCP 2
@@ -135,8 +136,14 @@ def fix_up(cls, command):
raise AttributeError('No EventingCommand.transform override')
SearchCommand.ConfigurationSettings.fix_up(command)
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
iteritems = SearchCommand.ConfigurationSettings.iteritems(self)
- return imap(lambda (name, value): (name, 'events' if name == 'type' else value), iteritems)
+ return imap(lambda name_value: (name_value[0], 'events' if name_value[0] == 'type' else name_value[1]), iteritems)
+
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
# endregion
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/external_search_command.py b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/external_search_command.py
index c71d11b..c230624 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/external_search_command.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/external_search_command.py
@@ -20,6 +20,7 @@
import os
import sys
import traceback
+from splunklib import six
if sys.platform == 'win32':
from signal import signal, CTRL_BREAK_EVENT, SIGBREAK, SIGINT, SIGTERM
@@ -36,11 +37,11 @@ class ExternalSearchCommand(object):
"""
def __init__(self, path, argv=None, environ=None):
- if not isinstance(path, (bytes, unicode)):
+ if not isinstance(path, (bytes, six.text_type)):
raise ValueError('Expected a string value for path, not {}'.format(repr(path)))
self._logger = getLogger(self.__class__.__name__)
- self._path = unicode(path)
+ self._path = six.text_type(path)
self._argv = None
self._environ = None
@@ -89,7 +90,7 @@ def execute(self):
self._execute(self._path, self._argv, self._environ)
except:
error_type, error, tb = sys.exc_info()
- message = 'Command execution failed: ' + unicode(error)
+ message = 'Command execution failed: ' + six.text_type(error)
self._logger.error(message + '\nTraceback:\n' + ''.join(traceback.format_tb(tb)))
sys.exit(1)
@@ -104,13 +105,13 @@ def _execute(path, argv=None, environ=None):
:param argv: Argument list.
:type argv: list or tuple
- The arguments to the child process should start with the name of the command being run, but this is not
- enforced. A value of :const:`None` specifies that the base name of path name :param:`path` should be used.
+ The arguments to the child process should start with the name of the command being run, but this is not
+ enforced. A value of :const:`None` specifies that the base name of path name :param:`path` should be used.
:param environ: A mapping which is used to define the environment variables for the new process.
:type environ: dict or None.
- This mapping is used instead of the current process’s environment. A value of :const:`None` specifies that
- the :data:`os.environ` mapping should be used.
+ This mapping is used instead of the current process’s environment. A value of :const:`None` specifies that
+ the :data:`os.environ` mapping should be used.
:return: None
@@ -142,7 +143,9 @@ def terminate_child():
p.wait()
logger.debug('finished command="%s", arguments=%s, pid=%d, returncode=%d', path, argv, p.pid, p.returncode)
- sys.exit(p.returncode)
+
+ if p.returncode != 0:
+ sys.exit(p.returncode)
@staticmethod
def _search_path(executable, paths):
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/generating_command.py b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/generating_command.py
index 3bd0192..6a75d2c 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/generating_command.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/generating_command.py
@@ -15,11 +15,13 @@
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
+import sys
from .decorators import ConfigurationSetting
from .search_command import SearchCommand
-from itertools import imap, ifilter
+from splunklib import six
+from splunklib.six.moves import map as imap, filter as ifilter
# P1 [O] TODO: Discuss generates_timeorder in the class-level documentation for GeneratingCommand
@@ -56,7 +58,7 @@ class GeneratingCommand(SearchCommand):
+==========+=====================================+============================================+
| streams | streaming=True[,local=[True|False]] | type='streaming'[,distributed=[true|false] |
+----------+-------------------------------------+--------------------------------------------+
- | events | retainsevents=True, streaming=False | type='eventing' |
+ | events | retainsevents=True, streaming=False | type='events' |
+----------+-------------------------------------+--------------------------------------------+
| reports | streaming=False | type='reporting' |
+----------+-------------------------------------+--------------------------------------------+
@@ -92,9 +94,10 @@ class StreamingGeneratingCommand(GeneratingCommand)
+==========+===================================================+===================================================+
| streams | 1. Add this line to your command's stanza in | 1. Add this configuration setting to your code: |
| | | |
- | | default/commands.conf. | .. code-block:: python |
- | | .. code-block:: python | @Configuration(distributed=True) |
- | | local = false | class SomeCommand(GeneratingCommand) |
+ | | default/commands.conf:: | .. code-block:: python |
+ | | | |
+ | | local = false | @Configuration(distributed=True) |
+ | | | class SomeCommand(GeneratingCommand) |
| | | ... |
| | 2. Restart splunk | |
| | | 2. You are good to go; no need to restart Splunk |
@@ -112,29 +115,33 @@ class StreamingGeneratingCommand(GeneratingCommand)
| | settings to your command class: | setting to your command class: |
| | | |
| | .. code-block:: python | .. code-block:: python |
- | | @Configuration( | @Configuration(type='eventing') |
+ | | | |
+ | | @Configuration( | @Configuration(type='events') |
| | retainsevents=True, streaming=False) | class SomeCommand(GeneratingCommand) |
| | class SomeCommand(GeneratingCommand) | ... |
| | ... | |
| | | |
| | Or add these lines to default/commands.conf: | |
| | | |
- | | .. code-block:: | |
- | | retains events = true | |
+ | | .. code-block:: text | |
+ | | | |
+ | | retainsevents = true | |
| | streaming = false | |
+----------+---------------------------------------------------+---------------------------------------------------+
Configure your command class like this, if you wish to support both protocols:
- .. code-block:: python
- @Configuration(type='eventing', retainsevents=True, streaming=False)
+ .. code-block:: python
+
+ @Configuration(type='events', retainsevents=True, streaming=False)
class SomeCommand(GeneratingCommand)
...
You might also consider adding these lines to commands.conf instead of adding them to your command class:
- .. code-block:: python
- retains events = false
+ .. code-block:: python
+
+ retainsevents = false
streaming = false
Reporting Generating command
@@ -149,28 +156,32 @@ class SomeCommand(GeneratingCommand)
| | settings to your command class: | setting to your command class: |
| | | |
| | .. code-block:: python | .. code-block:: python |
+ | | | |
| | @Configuration(retainsevents=False) | @Configuration(type='reporting') |
| | class SomeCommand(GeneratingCommand) | class SomeCommand(GeneratingCommand) |
| | ... | ... |
| | | |
| | Or add this lines to default/commands.conf: | |
| | | |
- | | .. code-block:: | |
- | | retains events = false | |
+ | | .. code-block:: text | |
+ | | | |
+ | | retainsevents = false | |
| | streaming = false | |
+----------+---------------------------------------------------+---------------------------------------------------+
Configure your command class like this, if you wish to support both protocols:
- .. code-block:: python
+ .. code-block:: python
+
@Configuration(type='reporting', streaming=False)
class SomeCommand(GeneratingCommand)
...
You might also consider adding these lines to commands.conf instead of adding them to your command class:
- .. code-block:: python
- retains events = false
+ .. code-block:: text
+
+ retainsevents = false
streaming = false
"""
@@ -193,9 +204,59 @@ def _execute(self, ifile, process):
:return: `None`.
"""
- self._record_writer.write_records(self.generate())
+ if self._protocol_version == 2:
+ self._execute_v2(ifile, self.generate())
+ else:
+ assert self._protocol_version == 1
+ self._record_writer.write_records(self.generate())
self.finish()
+ def _execute_chunk_v2(self, process, chunk):
+ count = 0
+ records = []
+ for row in process:
+ records.append(row)
+ count += 1
+ if count == self._record_writer._maxresultrows:
+ break
+
+ for row in records:
+ self._record_writer.write_record(row)
+
+ if count == self._record_writer._maxresultrows:
+ self._finished = False
+ else:
+ self._finished = True
+
+ def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout, allow_empty_input=True):
+ """ Process data.
+
+ :param argv: Command line arguments.
+ :type argv: list or tuple
+
+ :param ifile: Input data file.
+ :type ifile: file
+
+ :param ofile: Output data file.
+ :type ofile: file
+
+ :param allow_empty_input: For generating commands, it must be true. Doing otherwise will cause an error.
+ :type allow_empty_input: bool
+
+ :return: :const:`None`
+ :rtype: NoneType
+
+ """
+
+ # Generating commands are expected to run on an empty set of inputs as the first command being run in a search,
+ # also this class implements its own separate _execute_chunk_v2 method which does not respect allow_empty_input
+ # so ensure that allow_empty_input is always True
+
+ if not allow_empty_input:
+ raise ValueError("allow_empty_input cannot be False for Generating Commands")
+ else:
+ return super(GeneratingCommand, self).process(argv=argv, ifile=ifile, ofile=ofile, allow_empty_input=True)
+
# endregion
# region Types
@@ -280,7 +341,7 @@ class ConfigurationSettings(SearchCommand.ConfigurationSettings):
==================== ======================================================================================
Value Description
-------------------- --------------------------------------------------------------------------------------
- :const:`'eventing'` Runs as the first command in the Splunk events pipeline. Cannot be distributed.
+ :const:`'events'` Runs as the first command in the Splunk events pipeline. Cannot be distributed.
:const:`'reporting'` Runs as the first command in the Splunk reports pipeline. Cannot be distributed.
:const:`'streaming'` Runs as the first command in the Splunk streams pipeline. May be distributed.
==================== ======================================================================================
@@ -303,16 +364,22 @@ def fix_up(cls, command):
if command.generate == GeneratingCommand.generate:
raise AttributeError('No GeneratingCommand.generate override')
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
iteritems = SearchCommand.ConfigurationSettings.iteritems(self)
version = self.command.protocol_version
if version == 2:
- iteritems = ifilter(lambda (name, value): name != 'distributed', iteritems)
- if self.distributed and self.type == 'streaming':
+ iteritems = ifilter(lambda name_value1: name_value1[0] != 'distributed', iteritems)
+ if not self.distributed and self.type == 'streaming':
iteritems = imap(
- lambda (name, value): (name, 'stateful') if name == 'type' else (name, value), iteritems)
+ lambda name_value: (name_value[0], 'stateful') if name_value[0] == 'type' else (name_value[0], name_value[1]), iteritems)
return iteritems
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
+
pass
# endregion
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/internals.py b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/internals.py
index be57703..1ea2833 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/internals.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/internals.py
@@ -14,45 +14,65 @@
# License for the specific language governing permissions and limitations
# under the License.
-from __future__ import absolute_import, division, print_function, unicode_literals
+from __future__ import absolute_import, division, print_function
+from io import TextIOWrapper
from collections import deque, namedtuple
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from ..ordereddict import OrderedDict
-from cStringIO import StringIO
-from itertools import chain, imap
+from splunklib import six
+from collections import OrderedDict
+from splunklib.six.moves import StringIO
+from itertools import chain
+from splunklib.six.moves import map as imap
from json import JSONDecoder, JSONEncoder
from json.encoder import encode_basestring_ascii as json_encode_string
-from urllib import unquote
+from splunklib.six.moves import urllib
import csv
import gzip
import os
import re
import sys
+import warnings
from . import environment
csv.field_size_limit(10485760) # The default value is 128KB; upping to 10MB. See SPL-12117 for background on this issue
-if sys.platform == 'win32':
- # Work around the fact that on Windows '\n' is mapped to '\r\n'. The typical solution is to simply open files in
- # binary mode, but stdout is already open, thus this hack. 'CPython' and 'PyPy' work differently. We assume that
- # all other Python implementations are compatible with 'CPython'. This might or might not be a valid assumption.
- from platform import python_implementation
- implementation = python_implementation()
- fileno = sys.stdout.fileno()
- if implementation == 'PyPy':
- sys.stdout = os.fdopen(fileno, 'wb', 0)
- else:
- from msvcrt import setmode
- setmode(fileno, os.O_BINARY)
+
+def set_binary_mode(fh):
+ """ Helper method to set up binary mode for file handles.
+ Emphasis being sys.stdin, sys.stdout, sys.stderr.
+ For python3, we want to return .buffer
+ For python2+windows we want to set os.O_BINARY
+ """
+ typefile = TextIOWrapper if sys.version_info >= (3, 0) else file
+ # check for file handle
+ if not isinstance(fh, typefile):
+ return fh
+
+ # check for python3 and buffer
+ if sys.version_info >= (3, 0) and hasattr(fh, 'buffer'):
+ return fh.buffer
+ # check for python3
+ elif sys.version_info >= (3, 0):
+ pass
+ # check for windows python2. SPL-175233 -- python3 stdout is already binary
+ elif sys.platform == 'win32':
+ # Work around the fact that on Windows '\n' is mapped to '\r\n'. The typical solution is to simply open files in
+ # binary mode, but stdout is already open, thus this hack. 'CPython' and 'PyPy' work differently. We assume that
+ # all other Python implementations are compatible with 'CPython'. This might or might not be a valid assumption.
+ from platform import python_implementation
+ implementation = python_implementation()
+ if implementation == 'PyPy':
+ return os.fdopen(fh.fileno(), 'wb', 0)
+ else:
+ import msvcrt
+ msvcrt.setmode(fh.fileno(), os.O_BINARY)
+ return fh
class CommandLineParser(object):
- """ Parses the arguments to a search command.
+ r""" Parses the arguments to a search command.
A search command line is described by the following syntax.
@@ -210,7 +230,7 @@ def replace(match):
_escaped_character_re = re.compile(r'(\\.|""|[\\"])')
- _fieldnames_re = re.compile(r"""("(?:\\.|""|[^"])+"|(?:\\.|[^\s"])+)""")
+ _fieldnames_re = re.compile(r"""("(?:\\.|""|[^"\\])+"|(?:\\.|[^\s"])+)""")
_options_re = re.compile(r"""
# Captures a set of name/value pairs when used with re.finditer
@@ -243,7 +263,7 @@ class ConfigurationSettingsType(type):
"""
def __new__(mcs, module, name, bases):
- mcs = super(ConfigurationSettingsType, mcs).__new__(mcs, name, bases, {})
+ mcs = super(ConfigurationSettingsType, mcs).__new__(mcs, str(name), bases, {})
return mcs
def __init__(cls, module, name, bases):
@@ -264,10 +284,10 @@ def validate_configuration_setting(specification, name, value):
return value
specification = namedtuple(
- b'ConfigurationSettingSpecification', (
- b'type',
- b'constraint',
- b'supporting_protocols'))
+ 'ConfigurationSettingSpecification', (
+ 'type',
+ 'constraint',
+ 'supporting_protocols'))
# P1 [ ] TODO: Review ConfigurationSettingsType.specification_matrix for completeness and correctness
@@ -294,7 +314,7 @@ def validate_configuration_setting(specification, name, value):
supporting_protocols=[1]),
'maxinputs': specification(
type=int,
- constraint=lambda value: 0 <= value <= sys.maxint,
+ constraint=lambda value: 0 <= value <= six.MAXSIZE,
supporting_protocols=[2]),
'overrides_timeorder': specification(
type=bool,
@@ -321,22 +341,24 @@ def validate_configuration_setting(specification, name, value):
constraint=None,
supporting_protocols=[1]),
'streaming_preop': specification(
- type=(bytes, unicode),
+ type=(bytes, six.text_type),
constraint=None,
supporting_protocols=[1, 2]),
'type': specification(
- type=(bytes, unicode),
- constraint=lambda value: value in ('eventing', 'reporting', 'streaming'),
+ type=(bytes, six.text_type),
+ constraint=lambda value: value in ('events', 'reporting', 'streaming'),
supporting_protocols=[2])}
class CsvDialect(csv.Dialect):
""" Describes the properties of Splunk CSV streams """
- delimiter = b','
- quotechar = b'"'
+ delimiter = ','
+ quotechar = '"'
doublequote = True
skipinitialspace = False
- lineterminator = b'\r\n'
+ lineterminator = '\r\n'
+ if sys.version_info >= (3, 0) and sys.platform == 'win32':
+ lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
@@ -344,8 +366,9 @@ class InputHeader(dict):
""" Represents a Splunk input header as a collection of name/value pairs.
"""
+
def __str__(self):
- return '\n'.join([name + ':' + value for name, value in self.iteritems()])
+ return '\n'.join([name + ':' + value for name, value in six.iteritems(self)])
def read(self, ifile):
""" Reads an input header from an input file.
@@ -366,15 +389,16 @@ def read(self, ifile):
# start of a new item
if name is not None:
self[name] = value[:-1] # value sans trailing newline
- name, value = item[0], unquote(item[1])
+ name, value = item[0], urllib.parse.unquote(item[1])
elif name is not None:
# continuation of the current item
- value += unquote(line)
+ value += urllib.parse.unquote(line)
- if name is not None: self[name] = value[:-1] if value[-1] == '\n' else value
+ if name is not None:
+ self[name] = value[:-1] if value[-1] == '\n' else value
-Message = namedtuple(b'Message', (b'type', b'text'))
+Message = namedtuple('Message', ('type', 'text'))
class MetadataDecoder(JSONDecoder):
@@ -392,7 +416,7 @@ def _object_hook(dictionary):
while len(stack):
instance, member_name, dictionary = stack.popleft()
- for name, value in dictionary.iteritems():
+ for name, value in six.iteritems(dictionary):
if isinstance(value, dict):
stack.append((dictionary, name, value))
@@ -468,7 +492,7 @@ class RecordWriter(object):
def __init__(self, ofile, maxresultrows=None):
self._maxresultrows = 50000 if maxresultrows is None else maxresultrows
- self._ofile = ofile
+ self._ofile = set_binary_mode(ofile)
self._fieldnames = None
self._buffer = StringIO()
@@ -479,8 +503,9 @@ def __init__(self, ofile, maxresultrows=None):
self._inspector = OrderedDict()
self._chunk_count = 0
- self._record_count = 0
- self._total_record_count = 0L
+ self._pending_record_count = 0
+ self._committed_record_count = 0
+ self.custom_fields = set()
@property
def is_flushed(self):
@@ -496,7 +521,37 @@ def ofile(self):
@ofile.setter
def ofile(self, value):
- self._ofile = value
+ self._ofile = set_binary_mode(value)
+
+ @property
+ def pending_record_count(self):
+ return self._pending_record_count
+
+ @property
+ def _record_count(self):
+ warnings.warn(
+ "_record_count will be deprecated soon. Use pending_record_count instead.",
+ PendingDeprecationWarning
+ )
+ return self.pending_record_count
+
+ @property
+ def committed_record_count(self):
+ return self._committed_record_count
+
+ @property
+ def _total_record_count(self):
+ warnings.warn(
+ "_total_record_count will be deprecated soon. Use committed_record_count instead.",
+ PendingDeprecationWarning
+ )
+ return self.committed_record_count
+
+ def write(self, data):
+ bytes_type = bytes if sys.version_info >= (3, 0) else str
+ if not isinstance(data, bytes_type):
+ data = data.encode('utf-8')
+ self.ofile.write(data)
def flush(self, finished=None, partial=None):
assert finished is None or isinstance(finished, bool)
@@ -515,16 +570,16 @@ def write_record(self, record):
def write_records(self, records):
self._ensure_validity()
+ records = list(records)
write_record = self._write_record
for record in records:
write_record(record)
def _clear(self):
- self._buffer.reset()
+ self._buffer.seek(0)
self._buffer.truncate()
self._inspector.clear()
- self._record_count = 0
- self._flushed = False
+ self._pending_record_count = 0
def _ensure_validity(self):
if self._finished is True:
@@ -536,9 +591,9 @@ def _write_record(self, record):
fieldnames = self._fieldnames
if fieldnames is None:
- self._fieldnames = fieldnames = record.keys()
- value_list = imap(lambda fn: unicode(fn).encode('utf-8'), fieldnames)
- value_list = imap(lambda fn: (fn, b'__mv_' + fn), value_list)
+ self._fieldnames = fieldnames = list(record.keys())
+ self._fieldnames.extend([i for i in self.custom_fields if i not in self._fieldnames])
+ value_list = imap(lambda fn: (str(fn), str('__mv_') + str(fn)), fieldnames)
self._writerow(list(chain.from_iterable(value_list)))
get_value = record.get
@@ -561,14 +616,14 @@ def _write_record(self, record):
if len(value) > 1:
value_list = value
- sv = b''
- mv = b'$'
+ sv = ''
+ mv = '$'
for value in value_list:
if value is None:
- sv += b'\n'
- mv += b'$;$'
+ sv += '\n'
+ mv += '$;$'
continue
value_t = type(value)
@@ -577,17 +632,17 @@ def _write_record(self, record):
if value_t is bool:
value = str(value.real)
- elif value_t is unicode:
- value = value.encode('utf-8', errors='backslashreplace')
- elif value_t is int or value_t is long or value_t is float or value_t is complex:
+ elif value_t is six.text_type:
+ value = value
+ elif isinstance(value, six.integer_types) or value_t is float or value_t is complex:
value = str(value)
elif issubclass(value_t, (dict, list, tuple)):
value = str(''.join(RecordWriter._iterencode_json(value, 0)))
else:
value = repr(value).encode('utf-8', errors='backslashreplace')
- sv += value + b'\n'
- mv += value.replace(b'$', b'$$') + b'$;$'
+ sv += value + '\n'
+ mv += value.replace('$', '$$') + '$;$'
values += (sv[:-1], mv[:-2])
continue
@@ -603,11 +658,13 @@ def _write_record(self, record):
values += (value, None)
continue
- if value_t is unicode:
- values += (value.encode('utf-8', errors='backslashreplace'), None)
+ if value_t is six.text_type:
+ if six.PY2:
+ value = value.encode('utf-8')
+ values += (value, None)
continue
- if value_t is int or value_t is long or value_t is float or value_t is complex:
+ if isinstance(value, six.integer_types) or value_t is float or value_t is complex:
values += (str(value), None)
continue
@@ -615,12 +672,12 @@ def _write_record(self, record):
values += (str(''.join(RecordWriter._iterencode_json(value, 0))), None)
continue
- values += (repr(value).encode('utf-8', errors='backslashreplace'), None)
+ values += (repr(value), None)
self._writerow(values)
- self._record_count += 1
+ self._pending_record_count += 1
- if self._record_count >= self._maxresultrows:
+ if self.pending_record_count >= self._maxresultrows:
self.flush(partial=True)
try:
@@ -657,10 +714,9 @@ def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
- if self._record_count > 0 or (self._chunk_count == 0 and 'messages' in self._inspector):
+ if self.pending_record_count > 0 or (self._chunk_count == 0 and 'messages' in self._inspector):
messages = self._inspector.get('messages')
- write = self._ofile.write
if self._chunk_count == 0:
@@ -672,12 +728,12 @@ def flush(self, finished=None, partial=None):
message_level = RecordWriterV1._message_level.get
for level, text in messages:
- write(message_level(level, level))
- write('=')
- write(text)
- write('\r\n')
+ self.write(message_level(level, level))
+ self.write('=')
+ self.write(text)
+ self.write('\r\n')
- write('\r\n')
+ self.write('\r\n')
elif messages is not None:
@@ -695,10 +751,10 @@ def flush(self, finished=None, partial=None):
for level, text in messages:
print(level, text, file=stderr)
- write(self._buffer.getvalue())
- self._clear()
+ self.write(self._buffer.getvalue())
self._chunk_count += 1
- self._total_record_count += self._record_count
+ self._committed_record_count += self.pending_record_count
+ self._clear()
self._finished = finished is True
@@ -716,44 +772,43 @@ class RecordWriterV2(RecordWriter):
def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
- inspector = self._inspector
-
- if self._flushed is False:
-
- self._total_record_count += self._record_count
- self._chunk_count += 1
-
- # TODO: DVPL-6448: splunklib.searchcommands | Add support for partial: true when it is implemented in
- # ChunkedExternProcessor (See SPL-103525)
- #
- # We will need to replace the following block of code with this block:
- #
- # metadata = [
- # ('inspector', self._inspector if len(self._inspector) else None),
- # ('finished', finished),
- # ('partial', partial)]
- if len(inspector) == 0:
- inspector = None
-
- if partial is True:
- finished = False
-
- metadata = [item for item in ('inspector', inspector), ('finished', finished)]
- self._write_chunk(metadata, self._buffer.getvalue())
- self._clear()
+ if partial or not finished:
+ # Don't flush partial chunks, since the SCP v2 protocol does not
+ # provide a way to send partial chunks yet.
+ return
- elif finished is True:
- self._write_chunk((('finished', True),), '')
+ if not self.is_flushed:
+ self.write_chunk(finished=True)
- self._finished = finished is True
+ def write_chunk(self, finished=None):
+ inspector = self._inspector
+ self._committed_record_count += self.pending_record_count
+ self._chunk_count += 1
+
+ # TODO: DVPL-6448: splunklib.searchcommands | Add support for partial: true when it is implemented in
+ # ChunkedExternProcessor (See SPL-103525)
+ #
+ # We will need to replace the following block of code with this block:
+ #
+ # metadata = [item for item in (('inspector', inspector), ('finished', finished), ('partial', partial))]
+ #
+ # if partial is True:
+ # finished = False
+
+ if len(inspector) == 0:
+ inspector = None
+
+ metadata = [item for item in (('inspector', inspector), ('finished', finished))]
+ self._write_chunk(metadata, self._buffer.getvalue())
+ self._clear()
def write_metadata(self, configuration):
self._ensure_validity()
- metadata = chain(configuration.iteritems(), (('inspector', self._inspector if self._inspector else None),))
+ metadata = chain(six.iteritems(configuration), (('inspector', self._inspector if self._inspector else None),))
self._write_chunk(metadata, '')
- self._ofile.write('\n')
+ self.write('\n')
self._clear()
def write_metric(self, name, value):
@@ -761,26 +816,29 @@ def write_metric(self, name, value):
self._inspector['metric.' + name] = value
def _clear(self):
- RecordWriter._clear(self)
+ super(RecordWriterV2, self)._clear()
self._fieldnames = None
def _write_chunk(self, metadata, body):
if metadata:
metadata = str(''.join(self._iterencode_json(dict([(n, v) for n, v in metadata if v is not None]), 0)))
+ if sys.version_info >= (3, 0):
+ metadata = metadata.encode('utf-8')
metadata_length = len(metadata)
else:
metadata_length = 0
+ if sys.version_info >= (3, 0):
+ body = body.encode('utf-8')
body_length = len(body)
if not (metadata_length > 0 or body_length > 0):
return
- start_line = b'chunked 1.0,' + bytes(metadata_length) + b',' + bytes(body_length) + b'\n'
- write = self._ofile.write
- write(start_line)
- write(metadata)
- write(body)
+ start_line = 'chunked 1.0,%s,%s\n' % (metadata_length, body_length)
+ self.write(start_line)
+ self.write(metadata)
+ self.write(body)
self._ofile.flush()
- self._flushed = False
+ self._flushed = True
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/reporting_command.py b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/reporting_command.py
index c856ee1..9470861 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/reporting_command.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/reporting_command.py
@@ -23,6 +23,7 @@
from .streaming_command import StreamingCommand
from .search_command import SearchCommand
from .validators import Set
+from splunklib import six
class ReportingCommand(SearchCommand):
@@ -93,7 +94,7 @@ def prepare(self):
self._configuration.streaming_preop = ' '.join(streaming_preop)
return
- raise RuntimeError('Unrecognized reporting command phase: {}'.format(json_encode_string(unicode(phase))))
+ raise RuntimeError('Unrecognized reporting command phase: {}'.format(json_encode_string(six.text_type(phase))))
def reduce(self, records):
""" Override this method to produce a reporting data structure.
@@ -252,7 +253,7 @@ def fix_up(cls, command):
cls._requires_preop = False
return
- f = vars(command)[b'map'] # Function backing the map method
+ f = vars(command)['map'] # Function backing the map method
# EXPLANATION OF PREVIOUS STATEMENT: There is no way to add custom attributes to methods. See [Why does
# setattr fail on a method](http://stackoverflow.com/questions/7891277/why-does-setattr-fail-on-a-bound-method) for a discussion of this issue.
@@ -265,7 +266,7 @@ def fix_up(cls, command):
# Create new StreamingCommand.ConfigurationSettings class
- module = command.__module__ + b'.' + command.__name__ + b'.map'
+ module = command.__module__ + '.' + command.__name__ + '.map'
name = b'ConfigurationSettings'
bases = (StreamingCommand.ConfigurationSettings,)
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/search_command.py b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/search_command.py
index 380d760..dd11391 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/search_command.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/search_command.py
@@ -18,26 +18,28 @@
# Absolute imports
-from ..client import Service
+from collections import namedtuple
+import io
-from collections import namedtuple
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from ..ordereddict import OrderedDict
+from collections import OrderedDict
from copy import deepcopy
-from cStringIO import StringIO
-from itertools import chain, ifilter, imap, islice, izip
-from logging import _levelNames, getLevelName, getLogger
+from splunklib.six.moves import StringIO
+from itertools import chain, islice
+from splunklib.six.moves import filter as ifilter, map as imap, zip as izip
+from splunklib import six
+if six.PY2:
+ from logging import _levelNames, getLevelName, getLogger
+else:
+ from logging import _nameToLevel as _levelNames, getLevelName, getLogger
try:
from shutil import make_archive
except ImportError:
# Used for recording, skip on python 2.6
pass
from time import time
-from urllib import unquote
-from urlparse import urlsplit
+from splunklib.six.moves.urllib.parse import unquote
+from splunklib.six.moves.urllib.parse import urlsplit
from warnings import warn
from xml.etree import ElementTree
@@ -50,7 +52,7 @@
# Relative imports
-from . internals import (
+from .internals import (
CommandLineParser,
CsvDialect,
InputHeader,
@@ -64,6 +66,8 @@
json_encode_string)
from . import Boolean, Option, environment
+from ..client import Service
+
# ----------------------------------------------------------------------------------------------------------------------
@@ -91,6 +95,7 @@ class SearchCommand(object):
""" Represents a custom search command.
"""
+
def __init__(self):
# Variables that may be used, but not altered by derived classes
@@ -116,6 +121,7 @@ def __init__(self):
self._default_logging_level = self._logger.level
self._record_writer = None
self._records = None
+ self._allow_empty_input = True
def __str__(self):
text = ' '.join(chain((type(self).name, str(self.options)), [] if self.fieldnames is None else self.fieldnames))
@@ -152,7 +158,7 @@ def logging_level(self):
def logging_level(self, value):
if value is None:
value = self._default_logging_level
- if isinstance(value, (bytes, unicode)):
+ if isinstance(value, (bytes, six.text_type)):
try:
level = _levelNames[value.upper()]
except KeyError:
@@ -164,6 +170,14 @@ def logging_level(self, value):
raise ValueError('Unrecognized logging level: {}'.format(value))
self._logger.setLevel(level)
+ def add_field(self, current_record, field_name, field_value):
+ self._record_writer.custom_fields.add(field_name)
+ current_record[field_name] = field_value
+
+ def gen_record(self, **record):
+ self._record_writer.custom_fields |= set(record.keys())
+ return record
+
record = Option(doc='''
**Syntax: record=
@@ -248,7 +262,7 @@ def search_results_info(self):
invocation.
:return: Search results info:const:`None`, if the search results info file associated with the command
- invocation is inaccessible.
+ invocation is inaccessible.
:rtype: SearchResultsInfo or NoneType
"""
@@ -271,10 +285,10 @@ def search_results_info(self):
path = os.path.join(dispatch_dir, 'info.csv')
try:
- with open(path, 'rb') as f:
+ with io.open(path, 'r') as f:
reader = csv.reader(f, dialect=CsvDialect)
- fields = reader.next()
- values = reader.next()
+ fields = next(reader)
+ values = next(reader)
except IOError as error:
if error.errno == 2:
self.logger.error('Search results info file {} does not exist.'.format(json_encode_string(path)))
@@ -292,7 +306,7 @@ def convert_value(value):
except ValueError:
return value
- info = ObjectView(dict(imap(lambda (f, v): (convert_field(f), convert_value(v)), izip(fields, values))))
+ info = ObjectView(dict(imap(lambda f_v: (convert_field(f_v[0]), convert_value(f_v[1])), izip(fields, values))))
try:
count_map = info.countMap
@@ -309,7 +323,7 @@ def convert_value(value):
except AttributeError:
pass
else:
- messages = ifilter(lambda (t, m): t or m, izip(msg_type.split('\n'), msg_text.split('\n')))
+ messages = ifilter(lambda t_m: t_m[0] or t_m[1], izip(msg_type.split('\n'), msg_text.split('\n')))
info.msg = [Message(message) for message in messages]
del info.msgType
@@ -330,6 +344,7 @@ def service(self):
specifying this pair of configuration settings in commands.conf:
.. code-block:: python
+
enableheader = true
requires_srinfo = true
@@ -337,8 +352,8 @@ def service(self):
:code:`requires_srinfo` setting is false by default. Hence, you must set it.
:return: :class:`splunklib.client.Service`, if :code:`enableheader` and :code:`requires_srinfo` are both
- :code:`true`. Otherwise, if either :code:`enableheader` or :code:`requires_srinfo` are :code:`false`, a value
- of :code:`None` is returned.
+ :code:`true`. Otherwise, if either :code:`enableheader` or :code:`requires_srinfo` are :code:`false`, a value
+ of :code:`None` is returned.
"""
if self._service is not None:
@@ -389,7 +404,7 @@ def flush(self):
:return: :const:`None`
"""
- self._record_writer.flush(partial=True)
+ self._record_writer.flush(finished=False)
def prepare(self):
""" Prepare for execution.
@@ -404,7 +419,7 @@ def prepare(self):
"""
pass
- def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout):
+ def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout, allow_empty_input=True):
""" Process data.
:param argv: Command line arguments.
@@ -416,10 +431,16 @@ def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout):
:param ofile: Output data file.
:type ofile: file
+ :param allow_empty_input: Allow empty input records for the command, if False an Error will be returned if empty chunk body is encountered when read
+ :type allow_empty_input: bool
+
:return: :const:`None`
:rtype: NoneType
"""
+
+ self._allow_empty_input = allow_empty_input
+
if len(argv) > 1:
self._process_protocol_v1(argv, ifile, ofile)
else:
@@ -445,7 +466,7 @@ def _map_metadata(self, argv):
def _map(metadata_map):
metadata = {}
- for name, value in metadata_map.iteritems():
+ for name, value in six.iteritems(metadata_map):
if isinstance(value, dict):
value = _map(value)
else:
@@ -495,7 +516,7 @@ def _map(metadata_map):
'username':
(lambda v: v.ppc_user, lambda s: s.search_results_info)}}
- _MetadataSource = namedtuple(b'Source', (b'argv', b'input_header', b'search_results_info'))
+ _MetadataSource = namedtuple('Source', ('argv', 'input_header', 'search_results_info'))
def _prepare_protocol_v1(self, argv, ifile, ofile):
@@ -582,7 +603,7 @@ def _process_protocol_v1(self, argv, ifile, ofile):
ifile = self._prepare_protocol_v1(argv, ifile, ofile)
self._record_writer.write_record(dict(
- (n, ','.join(v) if isinstance(v, (list, tuple)) else v) for n, v in self._configuration.iteritems()))
+ (n, ','.join(v) if isinstance(v, (list, tuple)) else v) for n, v in six.iteritems(self._configuration)))
self.finish()
elif argv[1] == '__EXECUTE__':
@@ -610,7 +631,7 @@ def _process_protocol_v1(self, argv, ifile, ofile):
raise RuntimeError(message)
except (SyntaxError, ValueError) as error:
- self.write_error(unicode(error))
+ self.write_error(six.text_type(error))
self.flush()
exit(0)
@@ -625,6 +646,19 @@ def _process_protocol_v1(self, argv, ifile, ofile):
debug('%s.process finished under protocol_version=1', class_name)
+ def _protocol_v2_option_parser(self, arg):
+ """ Determines if an argument is an Option/Value pair, or just a Positional Argument.
+ Method so different search commands can handle parsing of arguments differently.
+
+ :param arg: A single argument provided to the command from SPL
+ :type arg: str
+
+ :return: [OptionName, OptionValue] OR [PositionalArgument]
+ :rtype: List[str]
+
+ """
+ return arg.split('=', 1)
+
def _process_protocol_v2(self, argv, ifile, ofile):
""" Processes records on the `input stream optionally writing records to the output stream.
@@ -647,7 +681,7 @@ def _process_protocol_v2(self, argv, ifile, ofile):
# noinspection PyBroadException
try:
debug('Reading metadata')
- metadata, body = self._read_chunk(ifile)
+ metadata, body = self._read_chunk(self._as_binary_stream(ifile))
action = getattr(metadata, 'action', None)
@@ -684,7 +718,7 @@ def _process_protocol_v2(self, argv, ifile, ofile):
# Write search command configuration for consumption by splunkd
# noinspection PyBroadException
try:
- self._record_writer = RecordWriterV2(ofile, getattr(self._metadata, 'maxresultrows', None))
+ self._record_writer = RecordWriterV2(ofile, getattr(self._metadata.searchinfo, 'maxresultrows', None))
self.fieldnames = []
self.options.reset()
@@ -695,11 +729,12 @@ def _process_protocol_v2(self, argv, ifile, ofile):
if args and type(args) == list:
for arg in args:
- result = arg.split('=', 1)
+ result = self._protocol_v2_option_parser(arg)
if len(result) == 1:
- self.fieldnames.append(result[0])
+ self.fieldnames.append(str(result[0]))
else:
name, value = result
+ name = str(name)
try:
option = self.options[name]
except KeyError:
@@ -725,7 +760,7 @@ def _process_protocol_v2(self, argv, ifile, ofile):
if error_count > 0:
exit(1)
- debug(' command: %s', unicode(self))
+ debug(' command: %s', six.text_type(self))
debug('Preparing for execution')
self.prepare()
@@ -743,7 +778,7 @@ def _process_protocol_v2(self, argv, ifile, ofile):
setattr(info, attr, [arg for arg in getattr(info, attr) if not arg.startswith('record=')])
metadata = MetadataEncoder().encode(self._metadata)
- ifile.record('chunked 1.0,', unicode(len(metadata)), ',0\n', metadata)
+ ifile.record('chunked 1.0,', six.text_type(len(metadata)), ',0\n', metadata)
if self.show_configuration:
self.write_info(self.name + ' command configuration: ' + str(self._configuration))
@@ -766,7 +801,6 @@ def _process_protocol_v2(self, argv, ifile, ofile):
# noinspection PyBroadException
try:
debug('Executing under protocol_version=2')
- self._records = self._records_protocol_v2
self._metadata.action = 'execute'
self._execute(ifile, None)
except SystemExit:
@@ -800,15 +834,15 @@ def write_metric(self, name, value):
:param name: Name of the metric.
:type name: basestring
- :param value: A 4-tuple containing the value of metric :param:`name` where
+ :param value: A 4-tuple containing the value of metric ``name`` where
value[0] = Elapsed seconds or :const:`None`.
value[1] = Number of invocations or :const:`None`.
value[2] = Input count or :const:`None`.
value[3] = Output count or :const:`None`.
- The :data:`SearchMetric` type provides a convenient encapsulation of :param:`value`.
- The :data:`SearchMetric` type provides a convenient encapsulation of :param:`value`.
+ The :data:`SearchMetric` type provides a convenient encapsulation of ``value``.
+ The :data:`SearchMetric` type provides a convenient encapsulation of ``value``.
:return: :const:`None`.
@@ -823,6 +857,8 @@ def _decode_list(mv):
_encoded_value = re.compile(r'\$(?P- (?:\$\$|[^$])*)\$(?:;|$)') # matches a single value in an encoded list
+ # Note: Subclasses must override this method so that it can be called
+ # called as self._execute(ifile, None)
def _execute(self, ifile, process):
""" Default processing loop
@@ -836,22 +872,38 @@ def _execute(self, ifile, process):
:rtype: NoneType
"""
- self._record_writer.write_records(process(self._records(ifile)))
- self.finish()
+ if self.protocol_version == 1:
+ self._record_writer.write_records(process(self._records(ifile)))
+ self.finish()
+ else:
+ assert self._protocol_version == 2
+ self._execute_v2(ifile, process)
@staticmethod
- def _read_chunk(ifile):
+ def _as_binary_stream(ifile):
+ naught = ifile.read(0)
+ if isinstance(naught, bytes):
+ return ifile
+
+ try:
+ return ifile.buffer
+ except AttributeError as error:
+ raise RuntimeError('Failed to get underlying buffer: {}'.format(error))
+ @staticmethod
+ def _read_chunk(istream):
# noinspection PyBroadException
+ assert isinstance(istream.read(0), six.binary_type), 'Stream must be binary'
+
try:
- header = ifile.readline()
+ header = istream.readline()
except Exception as error:
raise RuntimeError('Failed to read transport header: {}'.format(error))
if not header:
return None
- match = SearchCommand._header.match(header)
+ match = SearchCommand._header.match(six.ensure_str(header))
if match is None:
raise RuntimeError('Failed to parse transport header: {}'.format(header))
@@ -861,35 +913,39 @@ def _read_chunk(ifile):
body_length = int(body_length)
try:
- metadata = ifile.read(metadata_length)
+ metadata = istream.read(metadata_length)
except Exception as error:
raise RuntimeError('Failed to read metadata of length {}: {}'.format(metadata_length, error))
decoder = MetadataDecoder()
try:
- metadata = decoder.decode(metadata)
+ metadata = decoder.decode(six.ensure_str(metadata))
except Exception as error:
raise RuntimeError('Failed to parse metadata of length {}: {}'.format(metadata_length, error))
# if body_length <= 0:
# return metadata, ''
+ body = ""
try:
- body = ifile.read(body_length)
+ if body_length > 0:
+ body = istream.read(body_length)
except Exception as error:
raise RuntimeError('Failed to read body of length {}: {}'.format(body_length, error))
- return metadata, body
+ return metadata, six.ensure_str(body)
_header = re.compile(r'chunked\s+1.0\s*,\s*(\d+)\s*,\s*(\d+)\s*\n')
def _records_protocol_v1(self, ifile):
+ return self._read_csv_records(ifile)
+ def _read_csv_records(self, ifile):
reader = csv.reader(ifile, dialect=CsvDialect)
try:
- fieldnames = reader.next()
+ fieldnames = next(reader)
except StopIteration:
return
@@ -910,51 +966,37 @@ def _records_protocol_v1(self, ifile):
record[fieldname] = value
yield record
- def _records_protocol_v2(self, ifile):
+ def _execute_v2(self, ifile, process):
+ istream = self._as_binary_stream(ifile)
while True:
- result = self._read_chunk(ifile)
+ result = self._read_chunk(istream)
if not result:
return
metadata, body = result
action = getattr(metadata, 'action', None)
-
if action != 'execute':
raise RuntimeError('Expected execute action, not {}'.format(action))
- finished = getattr(metadata, 'finished', False)
+ self._finished = getattr(metadata, 'finished', False)
self._record_writer.is_flushed = False
- if len(body) > 0:
- reader = csv.reader(StringIO(body), dialect=CsvDialect)
+ self._execute_chunk_v2(process, result)
- try:
- fieldnames = reader.next()
- except StopIteration:
- return
+ self._record_writer.write_chunk(finished=self._finished)
- mv_fieldnames = dict([(name, name[len('__mv_'):]) for name in fieldnames if name.startswith('__mv_')])
+ def _execute_chunk_v2(self, process, chunk):
+ metadata, body = chunk
- if len(mv_fieldnames) == 0:
- for values in reader:
- yield OrderedDict(izip(fieldnames, values))
- else:
- for values in reader:
- record = OrderedDict()
- for fieldname, value in izip(fieldnames, values):
- if fieldname.startswith('__mv_'):
- if len(value) > 0:
- record[mv_fieldnames[fieldname]] = self._decode_list(value)
- elif fieldname not in record:
- record[fieldname] = value
- yield record
-
- if finished:
- return
+ if len(body) <= 0 and not self._allow_empty_input:
+ raise ValueError(
+ "No records found to process. Set allow_empty_input=True in dispatch function to move forward "
+ "with empty records.")
- self.flush()
+ records = self._read_csv_records(StringIO(body))
+ self._record_writer.write_records(process(records))
def _report_unexpected_error(self):
@@ -1005,7 +1047,8 @@ def __str__(self):
:return: String representation of this instance
"""
- text = ', '.join(imap(lambda (name, value): name + '=' + json_encode_string(unicode(value)), self.iteritems()))
+ #text = ', '.join(imap(lambda (name, value): name + '=' + json_encode_string(unicode(value)), self.iteritems()))
+ text = ', '.join(['{}={}'.format(name, json_encode_string(six.text_type(value))) for (name, value) in six.iteritems(self)])
return text
# region Methods
@@ -1024,23 +1067,29 @@ def fix_up(cls, command_class):
"""
return
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
definitions = type(self).configuration_setting_definitions
version = self.command.protocol_version
return ifilter(
- lambda (name, value): value is not None, imap(
+ lambda name_value1: name_value1[1] is not None, imap(
lambda setting: (setting.name, setting.__get__(self)), ifilter(
lambda setting: setting.is_supported_by_protocol(version), definitions)))
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
+
pass # endregion
pass # endregion
-SearchMetric = namedtuple(b'SearchMetric', (b'elapsed_seconds', b'invocation_count', b'input_count', b'output_count'))
+SearchMetric = namedtuple('SearchMetric', ('elapsed_seconds', 'invocation_count', 'input_count', 'output_count'))
-def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys.stdout, module_name=None):
+def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys.stdout, module_name=None, allow_empty_input=True):
""" Instantiates and executes a search command class
This function implements a `conditional script stanza `_ based on the value of
@@ -1063,11 +1112,13 @@ def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys
:type output_file: :code:`file`
:param module_name: Name of the module calling :code:`dispatch` or :const:`None`.
:type module_name: :code:`basestring`
+ :param allow_empty_input: Allow empty input records for the command, if False an Error will be returned if empty chunk body is encountered when read
+ :type allow_empty_input: bool
:returns: :const:`None`
**Example**
- .. code-block:: python
+ .. code-block:: python
:linenos:
#!/usr/bin/env python
@@ -1083,7 +1134,7 @@ def stream(records):
**Example**
- .. code-block:: python
+ .. code-block:: python
:linenos:
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option, validators
@@ -1100,4 +1151,4 @@ def stream(records):
assert issubclass(command_class, SearchCommand)
if module_name is None or module_name == '__main__':
- command_class().process(argv, input_file, output_file)
+ command_class().process(argv, input_file, output_file, allow_empty_input)
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/streaming_command.py b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/streaming_command.py
index 12e9f03..fa075ed 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/streaming_command.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/streaming_command.py
@@ -16,7 +16,8 @@
from __future__ import absolute_import, division, print_function, unicode_literals
-from itertools import ifilter, imap
+from splunklib import six
+from splunklib.six.moves import map as imap, filter as ifilter
from .decorators import ConfigurationSetting
from .search_command import SearchCommand
@@ -172,17 +173,23 @@ def fix_up(cls, command):
raise AttributeError('No StreamingCommand.stream override')
return
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
iteritems = SearchCommand.ConfigurationSettings.iteritems(self)
version = self.command.protocol_version
if version == 1:
if self.required_fields is None:
- iteritems = ifilter(lambda (name, value): name != 'clear_required_fields', iteritems)
+ iteritems = ifilter(lambda name_value: name_value[0] != 'clear_required_fields', iteritems)
else:
- iteritems = ifilter(lambda (name, value): name != 'distributed', iteritems)
- if self.distributed:
+ iteritems = ifilter(lambda name_value2: name_value2[0] != 'distributed', iteritems)
+ if not self.distributed:
iteritems = imap(
- lambda (name, value): (name, 'stateful') if name == 'type' else (name, value), iteritems)
+ lambda name_value1: (name_value1[0], 'stateful') if name_value1[0] == 'type' else (name_value1[0], name_value1[1]), iteritems)
return iteritems
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
+
# endregion
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/validators.py b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/validators.py
index 9b9fee3..22f0e16 100644
--- a/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/validators.py
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/searchcommands/validators.py
@@ -18,11 +18,13 @@
from json.encoder import encode_basestring_ascii as json_encode_string
from collections import namedtuple
-from cStringIO import StringIO
+from splunklib.six.moves import StringIO
from io import open
import csv
import os
import re
+from splunklib import six
+from splunklib.six.moves import getcwd
class Validator(object):
@@ -58,7 +60,7 @@ class Boolean(Validator):
def __call__(self, value):
if not (value is None or isinstance(value, bool)):
- value = unicode(value).lower()
+ value = six.text_type(value).lower()
if value not in Boolean.truth_values:
raise ValueError('Unrecognized truth value: {0}'.format(value))
value = Boolean.truth_values[value]
@@ -79,9 +81,9 @@ class Code(Validator):
def __init__(self, mode='eval'):
"""
:param mode: Specifies what kind of code must be compiled; it can be :const:`'exec'`, if source consists of a
- sequence of statements, :const:`'eval'`, if it consists of a single expression, or :const:`'single'` if it
- consists of a single interactive statement. In the latter case, expression statements that evaluate to
- something other than :const:`None` will be printed.
+ sequence of statements, :const:`'eval'`, if it consists of a single expression, or :const:`'single'` if it
+ consists of a single interactive statement. In the latter case, expression statements that evaluate to
+ something other than :const:`None` will be printed.
:type mode: unicode or bytes
"""
@@ -91,14 +93,16 @@ def __call__(self, value):
if value is None:
return None
try:
- return Code.object(compile(value, 'string', self._mode), unicode(value))
+ return Code.object(compile(value, 'string', self._mode), six.text_type(value))
except (SyntaxError, TypeError) as error:
- raise ValueError(error.message)
+ message = str(error)
+
+ six.raise_from(ValueError(message), error)
def format(self, value):
return None if value is None else value.source
- object = namedtuple(b'Code', (b'object', 'source'))
+ object = namedtuple('Code', ('object', 'source'))
class Fieldname(Validator):
@@ -109,7 +113,7 @@ class Fieldname(Validator):
def __call__(self, value):
if value is not None:
- value = unicode(value)
+ value = six.text_type(value)
if Fieldname.pattern.match(value) is None:
raise ValueError('Illegal characters in fieldname: {}'.format(value))
return value
@@ -132,7 +136,7 @@ def __call__(self, value):
if value is None:
return value
- path = unicode(value)
+ path = six.text_type(value)
if not os.path.isabs(path):
path = os.path.join(self.directory, path)
@@ -149,7 +153,7 @@ def format(self, value):
return None if value is None else value.name
_var_run_splunk = os.path.join(
- os.environ['SPLUNK_HOME'] if 'SPLUNK_HOME' in os.environ else os.getcwdu(), 'var', 'run', 'splunk')
+ os.environ['SPLUNK_HOME'] if 'SPLUNK_HOME' in os.environ else getcwd(), 'var', 'run', 'splunk')
class Integer(Validator):
@@ -183,7 +187,10 @@ def __call__(self, value):
if value is None:
return None
try:
- value = long(value)
+ if six.PY2:
+ value = long(value)
+ else:
+ value = int(value)
except ValueError:
raise ValueError('Expected integer value, not {}'.format(json_encode_string(value)))
@@ -191,7 +198,49 @@ def __call__(self, value):
return value
def format(self, value):
- return None if value is None else unicode(long(value))
+ return None if value is None else six.text_type(int(value))
+
+
+class Float(Validator):
+ """ Validates float option values.
+
+ """
+ def __init__(self, minimum=None, maximum=None):
+ if minimum is not None and maximum is not None:
+ def check_range(value):
+ if not (minimum <= value <= maximum):
+ raise ValueError('Expected float in the range [{0},{1}], not {2}'.format(minimum, maximum, value))
+ return
+ elif minimum is not None:
+ def check_range(value):
+ if value < minimum:
+ raise ValueError('Expected float in the range [{0},+∞], not {1}'.format(minimum, value))
+ return
+ elif maximum is not None:
+ def check_range(value):
+ if value > maximum:
+ raise ValueError('Expected float in the range [-∞,{0}], not {1}'.format(maximum, value))
+ return
+ else:
+ def check_range(value):
+ return
+
+ self.check_range = check_range
+ return
+
+ def __call__(self, value):
+ if value is None:
+ return None
+ try:
+ value = float(value)
+ except ValueError:
+ raise ValueError('Expected float value, not {}'.format(json_encode_string(value)))
+
+ self.check_range(value)
+ return value
+
+ def format(self, value):
+ return None if value is None else six.text_type(float(value))
class Duration(Validator):
@@ -244,10 +293,10 @@ class List(Validator):
class Dialect(csv.Dialect):
""" Describes the properties of list option values. """
strict = True
- delimiter = b','
- quotechar = b'"'
+ delimiter = str(',')
+ quotechar = str('"')
doublequote = True
- lineterminator = b'\n'
+ lineterminator = str('\n')
skipinitialspace = True
quoting = csv.QUOTE_MINIMAL
@@ -262,7 +311,7 @@ def __call__(self, value):
return value
try:
- value = csv.reader([value], self.Dialect).next()
+ value = next(csv.reader([value], self.Dialect))
except csv.Error as error:
raise ValueError(error)
@@ -297,7 +346,7 @@ def __call__(self, value):
if value is None:
return None
- value = unicode(value)
+ value = six.text_type(value)
if value not in self.membership:
raise ValueError('Unrecognized value: {0}'.format(value))
@@ -305,7 +354,7 @@ def __call__(self, value):
return self.membership[value]
def format(self, value):
- return None if value is None else self.membership.keys()[self.membership.values().index(value)]
+ return None if value is None else list(self.membership.keys())[list(self.membership.values()).index(value)]
class Match(Validator):
@@ -313,19 +362,19 @@ class Match(Validator):
"""
def __init__(self, name, pattern, flags=0):
- self.name = unicode(name)
+ self.name = six.text_type(name)
self.pattern = re.compile(pattern, flags)
def __call__(self, value):
if value is None:
return None
- value = unicode(value)
+ value = six.text_type(value)
if self.pattern.match(value) is None:
raise ValueError('Expected {}, not {}'.format(self.name, json_encode_string(value)))
return value
def format(self, value):
- return None if value is None else unicode(value)
+ return None if value is None else six.text_type(value)
class OptionName(Validator):
@@ -336,13 +385,13 @@ class OptionName(Validator):
def __call__(self, value):
if value is not None:
- value = unicode(value)
+ value = six.text_type(value)
if OptionName.pattern.match(value) is None:
raise ValueError('Illegal characters in option name: {}'.format(value))
return value
def format(self, value):
- return None if value is None else unicode(value)
+ return None if value is None else six.text_type(value)
class RegularExpression(Validator):
@@ -353,9 +402,9 @@ def __call__(self, value):
if value is None:
return None
try:
- value = re.compile(unicode(value))
+ value = re.compile(six.text_type(value))
except re.error as error:
- raise ValueError('{}: {}'.format(unicode(error).capitalize(), value))
+ raise ValueError('{}: {}'.format(six.text_type(error).capitalize(), value))
return value
def format(self, value):
@@ -372,7 +421,7 @@ def __init__(self, *args):
def __call__(self, value):
if value is None:
return None
- value = unicode(value)
+ value = six.text_type(value)
if value not in self.membership:
raise ValueError('Unrecognized value: {}'.format(value))
return value
@@ -381,4 +430,4 @@ def format(self, value):
return self.__call__(value)
-__all__ = ['Boolean', 'Code', 'Duration', 'File', 'Integer', 'List', 'Map', 'RegularExpression', 'Set']
+__all__ = ['Boolean', 'Code', 'Duration', 'File', 'Integer', 'Float', 'List', 'Map', 'RegularExpression', 'Set']
diff --git a/bin/ta_dmarc/solnlib/packages/splunklib/six.py b/bin/ta_dmarc/solnlib/packages/splunklib/six.py
new file mode 100644
index 0000000..d13e50c
--- /dev/null
+++ b/bin/ta_dmarc/solnlib/packages/splunklib/six.py
@@ -0,0 +1,993 @@
+# Copyright (c) 2010-2020 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson "
+__version__ = "1.14.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+
+ def __len__(self):
+ return 1 << 31
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result) # Invokes __set__.
+ try:
+ # This is a bit ugly, but it avoids running this again by
+ # removing this descriptor.
+ delattr(obj.__class__, self.name)
+ except AttributeError:
+ pass
+ return result
+
+
+class MovedModule(_LazyDescr):
+
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ _module = self._resolve()
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+ """
+ A meta path importer to import six.moves and its submodules.
+
+ This class implements a PEP302 finder and loader. It should be compatible
+ with Python 2.5 and all existing versions of Python3
+ """
+
+ def __init__(self, six_module_name):
+ self.name = six_module_name
+ self.known_modules = {}
+
+ def _add_module(self, mod, *fullnames):
+ for fullname in fullnames:
+ self.known_modules[self.name + "." + fullname] = mod
+
+ def _get_module(self, fullname):
+ return self.known_modules[self.name + "." + fullname]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.known_modules:
+ return self
+ return None
+
+ def __get_module(self, fullname):
+ try:
+ return self.known_modules[fullname]
+ except KeyError:
+ raise ImportError("This loader does not know module " + fullname)
+
+ def load_module(self, fullname):
+ try:
+ # in case of a reload
+ return sys.modules[fullname]
+ except KeyError:
+ pass
+ mod = self.__get_module(fullname)
+ if isinstance(mod, MovedModule):
+ mod = mod._resolve()
+ else:
+ mod.__loader__ = self
+ sys.modules[fullname] = mod
+ return mod
+
+ def is_package(self, fullname):
+ """
+ Return true, if the named module is a package.
+
+ We need this method to get correct spec objects with
+ Python 3.4 (see PEP451)
+ """
+ return hasattr(self.__get_module(fullname), "__path__")
+
+ def get_code(self, fullname):
+ """Return None
+
+ Required, if is_package is implemented"""
+ self.__get_module(fullname) # eventually raises ImportError
+ return None
+ get_source = get_code # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+ """Lazy loading of moved objects"""
+ __path__ = [] # mark as package
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("intern", "__builtin__", "sys"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+ MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("getoutput", "commands", "subprocess"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserDict", "UserDict", "collections"),
+ MovedAttribute("UserList", "UserList", "collections"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser",
+ "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog",
+ "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+ "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+ _moved_attributes += [
+ MovedModule("winreg", "_winreg"),
+ ]
+
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+ MovedAttribute("splittag", "urllib", "urllib.parse"),
+ MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("splitvalue", "urllib", "urllib.parse"),
+ MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+ "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+ "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+ MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
+ MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+ "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+ "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+ "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+ __path__ = [] # mark as package
+ parse = _importer._get_module("moves.urllib_parse")
+ error = _importer._get_module("moves.urllib_error")
+ request = _importer._get_module("moves.urllib_request")
+ response = _importer._get_module("moves.urllib_response")
+ robotparser = _importer._get_module("moves.urllib_robotparser")
+
+ def __dir__(self):
+ return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+ "moves.urllib")
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+
+try:
+ advance_iterator = next
+except NameError:
+ def advance_iterator(it):
+ return it.next()
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ def create_unbound_method(func, cls):
+ return func
+
+ Iterator = object
+else:
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ def create_unbound_method(func, cls):
+ return types.MethodType(func, None, cls)
+
+ class Iterator(object):
+
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(get_unbound_function,
+ """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+ def iterkeys(d, **kw):
+ return iter(d.keys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.values(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.items(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.lists(**kw))
+
+ viewkeys = operator.methodcaller("keys")
+
+ viewvalues = operator.methodcaller("values")
+
+ viewitems = operator.methodcaller("items")
+else:
+ def iterkeys(d, **kw):
+ return d.iterkeys(**kw)
+
+ def itervalues(d, **kw):
+ return d.itervalues(**kw)
+
+ def iteritems(d, **kw):
+ return d.iteritems(**kw)
+
+ def iterlists(d, **kw):
+ return d.iterlists(**kw)
+
+ viewkeys = operator.methodcaller("viewkeys")
+
+ viewvalues = operator.methodcaller("viewvalues")
+
+ viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+ "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+ "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+ def b(s):
+ return s.encode("latin-1")
+
+ def u(s):
+ return s
+ unichr = chr
+ import struct
+ int2byte = struct.Struct(">B").pack
+ del struct
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ del io
+ _assertCountEqual = "assertCountEqual"
+ if sys.version_info[1] <= 1:
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
+ else:
+ _assertRaisesRegex = "assertRaisesRegex"
+ _assertRegex = "assertRegex"
+ _assertNotRegex = "assertNotRegex"
+else:
+ def b(s):
+ return s
+ # Workaround for standalone backslash
+
+ def u(s):
+ return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+ unichr = unichr
+ int2byte = chr
+
+ def byte2int(bs):
+ return ord(bs[0])
+
+ def indexbytes(buf, i):
+ return ord(buf[i])
+ iterbytes = functools.partial(itertools.imap, ord)
+ import StringIO
+ StringIO = BytesIO = StringIO.StringIO
+ _assertCountEqual = "assertItemsEqual"
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+ return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+ return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+ return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+def assertNotRegex(self, *args, **kwargs):
+ return getattr(self, _assertNotRegex)(*args, **kwargs)
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+ def reraise(tp, value, tb=None):
+ try:
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+ finally:
+ value = None
+ tb = None
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+ exec_("""def reraise(tp, value, tb=None):
+ try:
+ raise tp, value, tb
+ finally:
+ tb = None
+""")
+
+
+if sys.version_info[:2] > (3,):
+ exec_("""def raise_from(value, from_value):
+ try:
+ raise value from from_value
+ finally:
+ value = None
+""")
+else:
+ def raise_from(value, from_value):
+ raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (isinstance(fp, file) and
+ isinstance(data, unicode) and
+ fp.encoding is not None):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+if sys.version_info[:2] < (3, 3):
+ _print = print_
+
+ def print_(*args, **kwargs):
+ fp = kwargs.get("file", sys.stdout)
+ flush = kwargs.pop("flush", False)
+ _print(*args, **kwargs)
+ if flush and fp is not None:
+ fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+ # This does exactly the same what the :func:`py3:functools.update_wrapper`
+ # function does on Python versions after 3.2. It sets the ``__wrapped__``
+ # attribute on ``wrapper`` object and it doesn't raise an error if any of
+ # the attributes mentioned in ``assigned`` and ``updated`` are missing on
+ # ``wrapped`` object.
+ def _update_wrapper(wrapper, wrapped,
+ assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ for attr in assigned:
+ try:
+ value = getattr(wrapped, attr)
+ except AttributeError:
+ continue
+ else:
+ setattr(wrapper, attr, value)
+ for attr in updated:
+ getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+ wrapper.__wrapped__ = wrapped
+ return wrapper
+ _update_wrapper.__doc__ = functools.update_wrapper.__doc__
+
+ def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ return functools.partial(_update_wrapper, wrapped=wrapped,
+ assigned=assigned, updated=updated)
+ wraps.__doc__ = functools.wraps.__doc__
+
+else:
+ wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(type):
+
+ def __new__(cls, name, this_bases, d):
+ if sys.version_info[:2] >= (3, 7):
+ # This version introduced PEP 560 that requires a bit
+ # of extra care (we mimic what is done by __build_class__).
+ resolved_bases = types.resolve_bases(bases)
+ if resolved_bases is not bases:
+ d['__orig_bases__'] = bases
+ else:
+ resolved_bases = bases
+ return meta(name, resolved_bases, d)
+
+ @classmethod
+ def __prepare__(cls, name, this_bases):
+ return meta.__prepare__(name, bases)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ slots = orig_vars.get('__slots__')
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ if hasattr(cls, '__qualname__'):
+ orig_vars['__qualname__'] = cls.__qualname__
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
+
+
+def ensure_binary(s, encoding='utf-8', errors='strict'):
+ """Coerce **s** to six.binary_type.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> encoded to `bytes`
+ - `bytes` -> `bytes`
+ """
+ if isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ elif isinstance(s, binary_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def ensure_str(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to `str`.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if not isinstance(s, (text_type, binary_type)):
+ raise TypeError("not expecting type '%s'" % type(s))
+ if PY2 and isinstance(s, text_type):
+ s = s.encode(encoding, errors)
+ elif PY3 and isinstance(s, binary_type):
+ s = s.decode(encoding, errors)
+ return s
+
+
+def ensure_text(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to six.text_type.
+
+ For Python 2:
+ - `unicode` -> `unicode`
+ - `str` -> `unicode`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif isinstance(s, text_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def python_2_unicode_compatible(klass):
+ """
+ A class decorator that defines __unicode__ and __str__ methods under Python 2.
+ Under Python 3 it does nothing.
+
+ To support Python 2 and 3 with a single code base, define a __str__ method
+ returning text and apply this decorator to the class.
+ """
+ if PY2:
+ if '__str__' not in klass.__dict__:
+ raise ValueError("@python_2_unicode_compatible cannot be applied "
+ "to %s because it doesn't define __str__()." %
+ klass.__name__)
+ klass.__unicode__ = klass.__str__
+ klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+ return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = [] # required for PEP 302 and PEP 451
+__package__ = __name__ # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+ __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+ for i, importer in enumerate(sys.meta_path):
+ # Here's some real nastiness: Another "instance" of the six module might
+ # be floating around. Therefore, we can't use isinstance() to check for
+ # the six meta path importer, since the other six instance will have
+ # inserted an importer with different class.
+ if (type(importer).__name__ == "_SixMetaPathImporter" and
+ importer.name == __name__):
+ del sys.meta_path[i]
+ break
+ del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
+
+import warnings
+
+def deprecated(message):
+ def deprecated_decorator(func):
+ def deprecated_func(*args, **kwargs):
+ warnings.warn("{} is a deprecated function. {}".format(func.__name__, message),
+ category=DeprecationWarning,
+ stacklevel=2)
+ warnings.simplefilter('default', DeprecationWarning)
+ return func(*args, **kwargs)
+ return deprecated_func
+ return deprecated_decorator
\ No newline at end of file
diff --git a/bin/ta_dmarc/splunklib/__init__.py b/bin/ta_dmarc/splunklib/__init__.py
index 92a9d0c..1f9fc68 100644
--- a/bin/ta_dmarc/splunklib/__init__.py
+++ b/bin/ta_dmarc/splunklib/__init__.py
@@ -14,6 +14,22 @@
"""Python library for Splunk."""
-__version_info__ = (1, 6, 0)
-__version__ = ".".join(map(str, __version_info__))
+from __future__ import absolute_import
+from splunklib.six.moves import map
+import logging
+
+DEFAULT_LOG_FORMAT = '%(asctime)s, Level=%(levelname)s, Pid=%(process)s, Logger=%(name)s, File=%(filename)s, ' \
+ 'Line=%(lineno)s, %(message)s'
+DEFAULT_DATE_FORMAT = '%Y-%m-%d %H:%M:%S %Z'
+
+# To set the logging level of splunklib
+# ex. To enable debug logs, call this method with parameter 'logging.DEBUG'
+# default logging level is set to 'WARNING'
+def setup_logging(level, log_format=DEFAULT_LOG_FORMAT, date_format=DEFAULT_DATE_FORMAT):
+ logging.basicConfig(level=level,
+ format=log_format,
+ datefmt=date_format)
+
+__version_info__ = (1, 6, 20)
+__version__ = ".".join(map(str, __version_info__))
diff --git a/bin/ta_dmarc/splunklib/binding.py b/bin/ta_dmarc/splunklib/binding.py
index 073a78d..bb2771d 100644
--- a/bin/ta_dmarc/splunklib/binding.py
+++ b/bin/ta_dmarc/splunklib/binding.py
@@ -24,29 +24,32 @@
:mod:`splunklib.client` module.
"""
-import httplib
+from __future__ import absolute_import
+
+import io
import logging
import socket
import ssl
-import urllib
-import io
import sys
-import Cookie
-
+import time
from base64 import b64encode
+from contextlib import contextmanager
from datetime import datetime
from functools import wraps
-from StringIO import StringIO
+from io import BytesIO
+from xml.etree.ElementTree import XML
-from contextlib import contextmanager
+from splunklib import six
+from splunklib.six.moves import urllib
+
+from .data import record
-from xml.etree.ElementTree import XML
try:
from xml.etree.ElementTree import ParseError
-except ImportError, e:
+except ImportError as e:
from xml.parsers.expat import ExpatError as ParseError
-from .data import record
+logger = logging.getLogger(__name__)
__all__ = [
"AuthenticationError",
@@ -68,7 +71,7 @@ def new_f(*args, **kwargs):
start_time = datetime.now()
val = f(*args, **kwargs)
end_time = datetime.now()
- logging.debug("Operation took %s", end_time-start_time)
+ logger.debug("Operation took %s", end_time-start_time)
return val
return new_f
@@ -78,6 +81,7 @@ def _parse_cookies(cookie_str, dictionary):
then updates the the dictionary with any key-value pairs found.
**Example**::
+
dictionary = {}
_parse_cookies('my=value', dictionary)
# Now the following is True
@@ -88,7 +92,7 @@ def _parse_cookies(cookie_str, dictionary):
:param dictionary: A dictionary to update with any found key-value pairs.
:type dictionary: ``dict``
"""
- parsed_cookie = Cookie.SimpleCookie(cookie_str)
+ parsed_cookie = six.moves.http_cookies.SimpleCookie(cookie_str)
for cookie in parsed_cookie.values():
dictionary[cookie.key] = cookie.coded_value
@@ -168,12 +172,12 @@ def __new__(self, val='', skip_encode=False, encode_slash=False):
elif skip_encode:
return str.__new__(self, val)
elif encode_slash:
- return str.__new__(self, urllib.quote_plus(val))
+ return str.__new__(self, urllib.parse.quote_plus(val))
else:
# When subclassing str, just call str's __new__ method
# with your class and the value you want to have in the
# new string.
- return str.__new__(self, urllib.quote(val))
+ return str.__new__(self, urllib.parse.quote(val))
def __add__(self, other):
"""self + other
@@ -184,7 +188,7 @@ def __add__(self, other):
if isinstance(other, UrlEncoded):
return UrlEncoded(str.__add__(self, other), skip_encode=True)
else:
- return UrlEncoded(str.__add__(self, urllib.quote(other)), skip_encode=True)
+ return UrlEncoded(str.__add__(self, urllib.parse.quote(other)), skip_encode=True)
def __radd__(self, other):
"""other + self
@@ -195,7 +199,7 @@ def __radd__(self, other):
if isinstance(other, UrlEncoded):
return UrlEncoded(str.__radd__(self, other), skip_encode=True)
else:
- return UrlEncoded(str.__add__(urllib.quote(other), self), skip_encode=True)
+ return UrlEncoded(str.__add__(urllib.parse.quote(other), self), skip_encode=True)
def __mod__(self, fields):
"""Interpolation into ``UrlEncoded``s is disabled.
@@ -205,7 +209,7 @@ def __mod__(self, fields):
"""
raise TypeError("Cannot interpolate into a UrlEncoded object.")
def __repr__(self):
- return "UrlEncoded(%s)" % repr(urllib.unquote(str(self)))
+ return "UrlEncoded(%s)" % repr(urllib.parse.unquote(str(self)))
@contextmanager
def _handle_auth_error(msg):
@@ -293,8 +297,7 @@ def wrapper(self, *args, **kwargs):
with _handle_auth_error("Autologin failed."):
self.login()
with _handle_auth_error(
- "Autologin succeeded, but there was an auth error on "
- "next request. Something is very wrong."):
+ "Authentication Failed! If session token is used, it seems to have been expired."):
return request_fun(self, *args, **kwargs)
elif he.status == 401 and not self.autologin:
raise AuthenticationError(
@@ -429,6 +432,8 @@ class Context(object):
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
+ :param verify: Enable (True) or disable (False) SSL verrification for https connections.
+ :type verify: ``Boolean``
:param sharing: The sharing mode for the namespace (the default is "user").
:type sharing: "global", "system", "app", or "user"
:param owner: The owner context of the namespace (optional, the default is "None").
@@ -445,6 +450,16 @@ class Context(object):
:type username: ``string``
:param password: The password for the Splunk account.
:type password: ``string``
+ :param splunkToken: Splunk authentication token
+ :type splunkToken: ``string``
+ :param headers: List of extra HTTP headers to send (optional).
+ :type headers: ``list`` of 2-tuples.
+ :param retires: Number of retries for each HTTP connection (optional, the default is 0).
+ NOTE THAT THIS MAY INCREASE THE NUMBER OF ROUND TRIP CONNECTIONS TO THE SPLUNK SERVER AND BLOCK THE
+ CURRENT THREAD WHILE RETRYING.
+ :type retries: ``int``
+ :param retryDelay: How long to wait between connection attempts if `retries` > 0 (optional, defaults to 10s).
+ :type retryDelay: ``int`` (in seconds)
:param handler: The HTTP request handler (optional).
:returns: A ``Context`` instance.
@@ -461,7 +476,9 @@ class Context(object):
c = binding.Context(cookie="splunkd_8089=...")
"""
def __init__(self, handler=None, **kwargs):
- self.http = HttpLib(handler)
+ self.http = HttpLib(handler, kwargs.get("verify", False), key_file=kwargs.get("key_file"),
+ cert_file=kwargs.get("cert_file"), context=kwargs.get("context"), # Default to False for backward compat
+ retries=kwargs.get("retries", 0), retryDelay=kwargs.get("retryDelay", 10))
self.token = kwargs.get("token", _NoAuthenticationToken)
if self.token is None: # In case someone explicitly passes token=None
self.token = _NoAuthenticationToken
@@ -473,10 +490,12 @@ def __init__(self, handler=None, **kwargs):
self.username = kwargs.get("username", "")
self.password = kwargs.get("password", "")
self.basic = kwargs.get("basic", False)
+ self.bearerToken = kwargs.get("splunkToken", "")
self.autologin = kwargs.get("autologin", False)
+ self.additional_headers = kwargs.get("headers", [])
# Store any cookies in the self.http._cookies dict
- if kwargs.has_key("cookie") and kwargs['cookie'] not in [None, _NoAuthenticationToken]:
+ if "cookie" in kwargs and kwargs['cookie'] not in [None, _NoAuthenticationToken]:
_parse_cookies(kwargs["cookie"], self.http._cookies)
def get_cookies(self):
@@ -488,13 +507,13 @@ def get_cookies(self):
return self.http._cookies
def has_cookies(self):
- """Returns true if the ``HttpLib`` member of this instance has at least
- one cookie stored.
+ """Returns true if the ``HttpLib`` member of this instance has auth token stored.
- :return: ``True`` if there is at least one cookie, else ``False``
+ :return: ``True`` if there is auth token present, else ``False``
:rtype: ``bool``
"""
- return len(self.get_cookies()) > 0
+ auth_token_key = "splunkd_"
+ return any(auth_token_key in key for key in self.get_cookies().keys())
# Shared per-context request headers
@property
@@ -508,9 +527,12 @@ def _auth_headers(self):
:returns: A list of 2-tuples containing key and value
"""
if self.has_cookies():
- return [("Cookie", _make_cookie_header(self.get_cookies().items()))]
+ return [("Cookie", _make_cookie_header(list(self.get_cookies().items())))]
elif self.basic and (self.username and self.password):
- token = 'Basic %s' % b64encode("%s:%s" % (self.username, self.password))
+ token = 'Basic %s' % b64encode(("%s:%s" % (self.username, self.password)).encode('utf-8')).decode('ascii')
+ return [("Authorization", token)]
+ elif self.bearerToken:
+ token = 'Bearer %s' % self.bearerToken
return [("Authorization", token)]
elif self.token is _NoAuthenticationToken:
return []
@@ -603,13 +625,13 @@ def delete(self, path_segment, owner=None, app=None, sharing=None, **query):
"""
path = self.authority + self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
- logging.debug("DELETE request to %s (body: %s)", path, repr(query))
+ logger.debug("DELETE request to %s (body: %s)", path, repr(query))
response = self.http.delete(path, self._auth_headers, **query)
return response
@_authentication
@_log_duration
- def get(self, path_segment, owner=None, app=None, sharing=None, **query):
+ def get(self, path_segment, owner=None, app=None, headers=None, sharing=None, **query):
"""Performs a GET operation from the REST path segment with the given
namespace and query.
@@ -632,6 +654,8 @@ def get(self, path_segment, owner=None, app=None, sharing=None, **query):
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
+ :param headers: List of extra HTTP headers to send (optional).
+ :type headers: ``list`` of 2-tuples.
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
:param query: All other keyword arguments, which are used as query
@@ -659,10 +683,14 @@ def get(self, path_segment, owner=None, app=None, sharing=None, **query):
c.logout()
c.get('apps/local') # raises AuthenticationError
"""
+ if headers is None:
+ headers = []
+
path = self.authority + self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
- logging.debug("GET request to %s (body: %s)", path, repr(query))
- response = self.http.get(path, self._auth_headers, **query)
+ logger.debug("GET request to %s (body: %s)", path, repr(query))
+ all_headers = headers + self.additional_headers + self._auth_headers
+ response = self.http.get(path, all_headers, **query)
return response
@_authentication
@@ -703,7 +731,12 @@ def post(self, path_segment, owner=None, app=None, sharing=None, headers=None, *
:type headers: ``list`` of 2-tuples.
:param query: All other keyword arguments, which are used as query
parameters.
- :type query: ``string``
+ :param body: Parameters to be used in the post body. If specified,
+ any parameters in the query will be applied to the URL instead of
+ the body. If a dict is supplied, the key-value pairs will be form
+ encoded. If a string is supplied, the body will be passed through
+ in the request unchanged.
+ :type body: ``dict`` or ``str``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
@@ -733,14 +766,20 @@ def post(self, path_segment, owner=None, app=None, sharing=None, headers=None, *
headers = []
path = self.authority + self._abspath(path_segment, owner=owner, app=app, sharing=sharing)
- logging.debug("POST request to %s (body: %s)", path, repr(query))
- all_headers = headers + self._auth_headers
+
+ # To avoid writing sensitive data in debug logs
+ endpoint_having_sensitive_data = ["/storage/passwords"]
+ if any(endpoint in path for endpoint in endpoint_having_sensitive_data):
+ logger.debug("POST request to %s ", path)
+ else:
+ logger.debug("POST request to %s (body: %s)", path, repr(query))
+ all_headers = headers + self.additional_headers + self._auth_headers
response = self.http.post(path, all_headers, **query)
return response
@_authentication
@_log_duration
- def request(self, path_segment, method="GET", headers=None, body="",
+ def request(self, path_segment, method="GET", headers=None, body={},
owner=None, app=None, sharing=None):
"""Issues an arbitrary HTTP request to the REST path segment.
@@ -769,9 +808,6 @@ def request(self, path_segment, method="GET", headers=None, body="",
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
- :param query: All other keyword arguments, which are used as query
- parameters.
- :type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
@@ -800,13 +836,28 @@ def request(self, path_segment, method="GET", headers=None, body="",
path = self.authority \
+ self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
- all_headers = headers + self._auth_headers
- logging.debug("%s request to %s (headers: %s, body: %s)",
+
+ all_headers = headers + self.additional_headers + self._auth_headers
+ logger.debug("%s request to %s (headers: %s, body: %s)",
method, path, str(all_headers), repr(body))
- response = self.http.request(path,
- {'method': method,
- 'headers': all_headers,
- 'body': body})
+
+ if body:
+ body = _encode(**body)
+
+ if method == "GET":
+ path = path + UrlEncoded('?' + body, skip_encode=True)
+ message = {'method': method,
+ 'headers': all_headers}
+ else:
+ message = {'method': method,
+ 'headers': all_headers,
+ 'body': body}
+ else:
+ message = {'method': method,
+ 'headers': all_headers}
+
+ response = self.http.request(path, message)
+
return response
def login(self):
@@ -848,12 +899,17 @@ def login(self):
# as credentials were passed in.
return
+ if self.bearerToken:
+ # Bearer auth mode requested, so this method is a nop as long
+ # as authentication token was passed in.
+ return
# Only try to get a token and updated cookie if username & password are specified
try:
response = self.http.post(
self.authority + self._abspath("/services/auth/login"),
username=self.username,
password=self.password,
+ headers=self.additional_headers,
cookie="1") # In Splunk 6.2+, passing "cookie=1" will return the "set-cookie" header
body = response.body.read()
@@ -964,6 +1020,8 @@ def connect(**kwargs):
:type username: ``string``
:param password: The password for the Splunk account.
:type password: ``string``
+ :param headers: List of extra HTTP headers to send (optional).
+ :type headers: ``list`` of 2-tuples.
:param autologin: When ``True``, automatically tries to log in again if the
session terminates.
:type autologin: ``Boolean``
@@ -1011,7 +1069,7 @@ class AuthenticationError(HTTPError):
def __init__(self, message, cause):
# Put the body back in the response so that HTTPError's constructor can
# read it again.
- cause._response.body = StringIO(cause.body)
+ cause._response.body = BytesIO(cause.body)
HTTPError.__init__(self, cause._response, message)
@@ -1037,27 +1095,28 @@ def __init__(self, message, cause):
#
# Encode the given kwargs as a query string. This wrapper will also _encode
-# a list value as a sequence of assignemnts to the corresponding arg name,
+# a list value as a sequence of assignments to the corresponding arg name,
# for example an argument such as 'foo=[1,2,3]' will be encoded as
# 'foo=1&foo=2&foo=3'.
def _encode(**kwargs):
items = []
- for key, value in kwargs.iteritems():
+ for key, value in six.iteritems(kwargs):
if isinstance(value, list):
items.extend([(key, item) for item in value])
else:
items.append((key, value))
- return urllib.urlencode(items)
+ return urllib.parse.urlencode(items)
# Crack the given url into (scheme, host, port, path)
def _spliturl(url):
- scheme, opaque = urllib.splittype(url)
- netloc, path = urllib.splithost(opaque)
- host, port = urllib.splitport(netloc)
+ parsed_url = urllib.parse.urlparse(url)
+ host = parsed_url.hostname
+ port = parsed_url.port
+ path = '?'.join((parsed_url.path, parsed_url.query)) if parsed_url.query else parsed_url.path
# Strip brackets if its an IPv6 address
if host.startswith('[') and host.endswith(']'): host = host[1:-1]
if port is None: port = DEFAULT_PORT
- return scheme, host, port, path
+ return parsed_url.scheme, host, port, path
# Given an HTTP request handler, this wrapper objects provides a related
# family of convenience methods built using that handler.
@@ -1100,10 +1159,17 @@ class HttpLib(object):
The response dictionary is returned directly by ``HttpLib``'s methods with
no further processing. By default, ``HttpLib`` calls the :func:`handler` function
to get a handler function.
+
+ If using the default handler, SSL verification can be disabled by passing verify=False.
"""
- def __init__(self, custom_handler=None):
- self.handler = handler() if custom_handler is None else custom_handler
+ def __init__(self, custom_handler=None, verify=False, key_file=None, cert_file=None, context=None, retries=0, retryDelay=10):
+ if custom_handler is None:
+ self.handler = handler(verify=verify, key_file=key_file, cert_file=cert_file, context=context)
+ else:
+ self.handler = custom_handler
self._cookies = {}
+ self.retries = retries
+ self.retryDelay = retryDelay
def delete(self, url, headers=None, **kwargs):
"""Sends a DELETE request to a URL.
@@ -1183,16 +1249,18 @@ def post(self, url, headers=None, **kwargs):
# to support the receivers/stream endpoint.
if 'body' in kwargs:
# We only use application/x-www-form-urlencoded if there is no other
- # Content-Type header present. This can happen in cases where we
+ # Content-Type header present. This can happen in cases where we
# send requests as application/json, e.g. for KV Store.
- if len(filter(lambda x: x[0].lower() == "content-type", headers)) == 0:
+ if len([x for x in headers if x[0].lower() == "content-type"]) == 0:
headers.append(("Content-Type", "application/x-www-form-urlencoded"))
body = kwargs.pop('body')
+ if isinstance(body, dict):
+ body = _encode(**body).encode('utf-8')
if len(kwargs) > 0:
url = url + UrlEncoded('?' + _encode(**kwargs), skip_encode=True)
else:
- body = _encode(**kwargs)
+ body = _encode(**kwargs).encode('utf-8')
message = {
'method': "POST",
'headers': headers,
@@ -1215,7 +1283,16 @@ def request(self, url, message, **kwargs):
its structure).
:rtype: ``dict``
"""
- response = self.handler(url, message, **kwargs)
+ while True:
+ try:
+ response = self.handler(url, message, **kwargs)
+ break
+ except Exception:
+ if self.retries <= 0:
+ raise
+ else:
+ time.sleep(self.retryDelay)
+ self.retries -= 1
response = record(response)
if 400 <= response.status:
raise HTTPError(response)
@@ -1226,7 +1303,7 @@ def request(self, url, message, **kwargs):
# If response.headers is a dict, get the key-value pairs as 2-tuples
# this is the case when using urllib2
if isinstance(response.headers, dict):
- key_value_tuples = response.headers.items()
+ key_value_tuples = list(response.headers.items())
for key, value in key_value_tuples:
if key.lower() == "set-cookie":
_parse_cookies(value, self._cookies)
@@ -1248,15 +1325,18 @@ class ResponseReader(io.RawIOBase):
def __init__(self, response, connection=None):
self._response = response
self._connection = connection
- self._buffer = ''
+ self._buffer = b''
def __str__(self):
- return self.read()
+ if six.PY2:
+ return self.read()
+ else:
+ return str(self.read(), 'UTF-8')
@property
def empty(self):
"""Indicates whether there is any more data in the response."""
- return self.peek(1) == ""
+ return self.peek(1) == b""
def peek(self, size):
"""Nondestructively retrieves a given number of characters.
@@ -1273,8 +1353,8 @@ def peek(self, size):
def close(self):
"""Closes this response."""
- if _connection:
- _connection.close()
+ if self._connection:
+ self._connection.close()
self._response.close()
def read(self, size = None):
@@ -1286,7 +1366,7 @@ def read(self, size = None):
"""
r = self._buffer
- self._buffer = ''
+ self._buffer = b''
if size is not None:
size -= len(r)
r = r + self._response.read(size)
@@ -1310,7 +1390,7 @@ def readinto(self, byte_array):
return bytes_read
-def handler(key_file=None, cert_file=None, timeout=None):
+def handler(key_file=None, cert_file=None, timeout=None, verify=False, context=None):
"""This class returns an instance of the default HTTP request handler using
the values you provide.
@@ -1320,21 +1400,28 @@ def handler(key_file=None, cert_file=None, timeout=None):
:type cert_file: ``string``
:param `timeout`: The request time-out period, in seconds (optional).
:type timeout: ``integer`` or "None"
+ :param `verify`: Set to False to disable SSL verification on https connections.
+ :type verify: ``Boolean``
+ :param `context`: The SSLContext that can is used with the HTTPSConnection when verify=True is enabled and context is specified
+ :type context: ``SSLContext`
"""
def connect(scheme, host, port):
kwargs = {}
if timeout is not None: kwargs['timeout'] = timeout
if scheme == "http":
- return httplib.HTTPConnection(host, port, **kwargs)
+ return six.moves.http_client.HTTPConnection(host, port, **kwargs)
if scheme == "https":
if key_file is not None: kwargs['key_file'] = key_file
if cert_file is not None: kwargs['cert_file'] = cert_file
- # If running Python 2.7.9+, disable SSL certificate validation
- if sys.version_info >= (2,7,9) and key_file is None and cert_file is None:
+ if not verify:
kwargs['context'] = ssl._create_unverified_context()
- return httplib.HTTPSConnection(host, port, **kwargs)
+ elif context:
+ # verify is True in elif branch and context is not None
+ kwargs['context'] = context
+
+ return six.moves.http_client.HTTPSConnection(host, port, **kwargs)
raise ValueError("unsupported scheme: %s" % scheme)
def request(url, message, **kwargs):
@@ -1343,7 +1430,7 @@ def request(url, message, **kwargs):
head = {
"Content-Length": str(len(body)),
"Host": host,
- "User-Agent": "splunk-sdk-python/1.6.0",
+ "User-Agent": "splunk-sdk-python/1.6.20",
"Accept": "*/*",
"Connection": "Close",
} # defaults
diff --git a/bin/ta_dmarc/splunklib/client.py b/bin/ta_dmarc/splunklib/client.py
index 982fd41..35d9e4f 100644
--- a/bin/ta_dmarc/splunklib/client.py
+++ b/bin/ta_dmarc/splunklib/client.py
@@ -58,18 +58,24 @@
my_app.package() # Creates a compressed package of this application
"""
+import contextlib
import datetime
import json
-import urllib
import logging
-from time import sleep
-from datetime import datetime, timedelta
import socket
-import contextlib
+from datetime import datetime, timedelta
+from time import sleep
+
+from splunklib import six
+from splunklib.six.moves import urllib
-from .binding import Context, HTTPError, AuthenticationError, namespace, UrlEncoded, _encode, _make_cookie_header
-from .data import record
from . import data
+from .binding import (AuthenticationError, Context, HTTPError, UrlEncoded,
+ _encode, _make_cookie_header, _NoAuthenticationToken,
+ namespace)
+from .data import record
+
+logger = logging.getLogger(__name__)
__all__ = [
"connect",
@@ -100,8 +106,8 @@
PATH_SAVED_SEARCHES = "saved/searches/"
PATH_STANZA = "configs/conf-%s/%s" # (file, stanza)
PATH_USERS = "authentication/users/"
-PATH_RECEIVERS_STREAM = "receivers/stream"
-PATH_RECEIVERS_SIMPLE = "receivers/simple"
+PATH_RECEIVERS_STREAM = "/services/receivers/stream"
+PATH_RECEIVERS_SIMPLE = "/services/receivers/simple"
PATH_STORAGE_PASSWORDS = "storage/passwords"
XNAMEF_ATOM = "{http://www.w3.org/2005/Atom}%s"
@@ -182,7 +188,7 @@ def _trailing(template, *targets):
def _filter_content(content, *args):
if len(args) > 0:
return record((k, content[k]) for k in args)
- return record((k, v) for k, v in content.iteritems()
+ return record((k, v) for k, v in six.iteritems(content)
if k not in ['eai:acl', 'eai:attributes', 'type'])
# Construct a resource path from the given base path + resource name
@@ -192,8 +198,11 @@ def _path(base, name):
# Load an atom record from the body of the given response
+# this will ultimately be sent to an xml ElementTree so we
+# should use the xmlcharrefreplace option
def _load_atom(response, match=None):
- return data.load(response.body.read(), match)
+ return data.load(response.body.read()
+ .decode('utf-8', 'xmlcharrefreplace'), match)
# Load an array of atom entries from the body of the given response
@@ -217,7 +226,10 @@ def _load_atom_entries(response):
# Load the sid from the body of the given response
-def _load_sid(response):
+def _load_sid(response, output_mode):
+ if output_mode == "json":
+ json_obj = json.loads(response.body.read())
+ return json_obj.get('sid')
return _load_atom(response).response.sid
@@ -236,7 +248,7 @@ def _parse_atom_entry(entry):
metadata = _parse_atom_metadata(content)
# Filter some of the noise out of the content record
- content = record((k, v) for k, v in content.iteritems()
+ content = record((k, v) for k, v in six.iteritems(content)
if k not in ['eai:acl', 'eai:attributes'])
if 'type' in content:
@@ -288,6 +300,9 @@ def connect(**kwargs):
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
+ :param verify: Enable (True) or disable (False) SSL verification for
+ https connections. (optional, the default is True)
+ :type verify: ``Boolean``
:param `owner`: The owner context of the namespace (optional).
:type owner: ``string``
:param `app`: The app context of the namespace (optional).
@@ -308,6 +323,13 @@ def connect(**kwargs):
:type username: ``string``
:param `password`: The password for the Splunk account.
:type password: ``string``
+ :param retires: Number of retries for each HTTP connection (optional, the default is 0).
+ NOTE THAT THIS MAY INCREASE THE NUMBER OF ROUND TRIP CONNECTIONS TO THE SPLUNK SERVER.
+ :type retries: ``int``
+ :param retryDelay: How long to wait between connection attempts if `retries` > 0 (optional, defaults to 10s).
+ :type retryDelay: ``int`` (in seconds)
+ :param `context`: The SSLContext that can be used when setting verify=True (optional)
+ :type context: ``SSLContext``
:return: An initialized :class:`Service` connection.
**Example**::
@@ -355,6 +377,9 @@ class Service(_BaseService):
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
+ :param verify: Enable (True) or disable (False) SSL verification for
+ https connections. (optional, the default is True)
+ :type verify: ``Boolean``
:param `owner`: The owner context of the namespace (optional; use "-" for wildcard).
:type owner: ``string``
:param `app`: The app context of the namespace (optional; use "-" for wildcard).
@@ -371,6 +396,11 @@ class Service(_BaseService):
:param `password`: The password, which is used to authenticate the Splunk
instance.
:type password: ``string``
+ :param retires: Number of retries for each HTTP connection (optional, the default is 0).
+ NOTE THAT THIS MAY INCREASE THE NUMBER OF ROUND TRIP CONNECTIONS TO THE SPLUNK SERVER.
+ :type retries: ``int``
+ :param retryDelay: How long to wait between connection attempts if `retries` > 0 (optional, defaults to 10s).
+ :type retryDelay: ``int`` (in seconds)
:return: A :class:`Service` instance.
**Example**::
@@ -388,6 +418,7 @@ class Service(_BaseService):
def __init__(self, **kwargs):
super(Service, self).__init__(**kwargs)
self._splunk_version = None
+ self._kvstore_owner = None
@property
def apps(self):
@@ -450,6 +481,13 @@ def info(self):
response = self.get("/services/server/info")
return _filter_content(_load_atom(response, MATCH_ENTRY_CONTENT))
+ def input(self, path, kind=None):
+ """Retrieves an input by path, and optionally kind.
+
+ :return: A :class:`Input` object.
+ """
+ return Input(self, path, kind=kind).refresh()
+
@property
def inputs(self):
"""Returns the collection of inputs configured on this Splunk instance.
@@ -550,7 +588,7 @@ def restart(self, timeout=None):
# This message will be deleted once the server actually restarts.
self.messages.create(name="restart_required", **msg)
result = self.post("/services/server/control/restart")
- if timeout is None:
+ if timeout is None:
return result
start = datetime.now()
diff = timedelta(seconds=timeout)
@@ -559,9 +597,9 @@ def restart(self, timeout=None):
self.login()
if not self.restart_required:
return result
- except Exception, e:
+ except Exception as e:
sleep(1)
- raise Exception, "Operation time out."
+ raise Exception("Operation time out.")
@property
def restart_required(self):
@@ -653,12 +691,34 @@ def splunk_version(self):
self._splunk_version = tuple([int(p) for p in self.info['version'].split('.')])
return self._splunk_version
+ @property
+ def kvstore_owner(self):
+ """Returns the KVStore owner for this instance of Splunk.
+
+ By default is the kvstore owner is not set, it will return "nobody"
+ :return: A string with the KVStore owner.
+ """
+ if self._kvstore_owner is None:
+ self._kvstore_owner = "nobody"
+ return self._kvstore_owner
+
+ @kvstore_owner.setter
+ def kvstore_owner(self, value):
+ """
+ kvstore is refreshed, when the owner value is changed
+ """
+ self._kvstore_owner = value
+ self.kvstore
+
@property
def kvstore(self):
"""Returns the collection of KV Store collections.
+ sets the owner for the namespace, before retrieving the KVStore Collection
+
:return: A :class:`KVStoreCollections` collection of :class:`KVStoreCollection` entities.
"""
+ self.namespace['owner'] = self.kvstore_owner
return KVStoreCollections(self)
@property
@@ -679,7 +739,7 @@ class Endpoint(object):
"""
def __init__(self, service, path):
self.service = service
- self.path = path if path.endswith('/') else path + '/'
+ self.path = path
def get(self, path_segment="", owner=None, app=None, sharing=None, **query):
"""Performs a GET operation on the path segment relative to this endpoint.
@@ -737,6 +797,8 @@ def get(self, path_segment="", owner=None, app=None, sharing=None, **query):
if path_segment.startswith('/'):
path = path_segment
else:
+ if not self.path.endswith('/') and path_segment != "":
+ self.path = self.path + '/'
path = self.service._abspath(self.path + path_segment, owner=owner,
app=app, sharing=sharing)
# ^-- This was "%s%s" % (self.path, path_segment).
@@ -797,6 +859,8 @@ def post(self, path_segment="", owner=None, app=None, sharing=None, **query):
if path_segment.startswith('/'):
path = path_segment
else:
+ if not self.path.endswith('/') and path_segment != "":
+ self.path = self.path + '/'
path = self.service._abspath(self.path + path_segment, owner=owner, app=app, sharing=sharing)
return self.service.post(path, owner=owner, app=app, sharing=sharing, **query)
@@ -808,35 +872,24 @@ class Entity(Endpoint):
``Entity`` provides the majority of functionality required by entities.
Subclasses only implement the special cases for individual entities.
- For example for deployment serverclasses, the subclass makes whitelists and
- blacklists into Python lists.
+ For example for saved searches, the subclass makes fields like ``action.email``,
+ ``alert_type``, and ``search`` available.
An ``Entity`` is addressed like a dictionary, with a few extensions,
- so the following all work::
-
- ent['email.action']
- ent['disabled']
- ent['whitelist']
-
- Many endpoints have values that share a prefix, such as
- ``email.to``, ``email.action``, and ``email.subject``. You can extract
- the whole fields, or use the key ``email`` to get a dictionary of
- all the subelements. That is, ``ent['email']`` returns a
- dictionary with the keys ``to``, ``action``, ``subject``, and so on. If
- there are multiple levels of dots, each level is made into a
- subdictionary, so ``email.body.salutation`` can be accessed at
- ``ent['email']['body']['salutation']`` or
- ``ent['email.body.salutation']``.
+ so the following all work, for example in saved searches::
+
+ ent['action.email']
+ ent['alert_type']
+ ent['search']
You can also access the fields as though they were the fields of a Python
object, as in::
- ent.email.action
- ent.disabled
- ent.whitelist
+ ent.alert_type
+ ent.search
However, because some of the field names are not valid Python identifiers,
- the dictionary-like syntax is preferrable.
+ the dictionary-like syntax is preferable.
The state of an :class:`Entity` object is cached, so accessing a field
does not contact the server. If you think the values on the
@@ -884,7 +937,7 @@ def __contains__(self, item):
try:
self[item]
return True
- except KeyError:
+ except (KeyError, AttributeError):
return False
def __eq__(self, other):
@@ -933,7 +986,10 @@ def __getitem__(self, key):
def _load_atom_entry(self, response):
elem = _load_atom(response, XNAME_ENTRY)
if isinstance(elem, list):
- raise AmbiguousReferenceException("Fetch from server returned multiple entries for name %s." % self.name)
+ apps = [ele.entry.content.get('eai:appName') for ele in elem]
+
+ raise AmbiguousReferenceException(
+ "Fetch from server returned multiple entries for name '%s' in apps %s." % (elem[0].entry.title, apps))
else:
return elem.entry
@@ -1039,8 +1095,6 @@ def content(self):
def disable(self):
"""Disables the entity at this endpoint."""
self.post("disable")
- if self.service.restart_required:
- self.service.restart(120)
return self
def enable(self):
@@ -1081,7 +1135,7 @@ def read(self, response):
# text to be dispatched via HTTP. However, these links are already
# URL encoded when they arrive, and we need to mark them as such.
unquoted_links = dict([(k, UrlEncoded(v, skip_encode=True))
- for k,v in results['links'].iteritems()])
+ for k,v in six.iteritems(results['links'])])
results['links'] = unquoted_links
return results
@@ -1187,7 +1241,7 @@ def __getitem__(self, key):
:raises ValueError: Raised if no namespace is specified and *key*
does not refer to a unique name.
- *Example*::
+ **Example**::
s = client.connect(...)
saved_searches = s.saved_searches
@@ -1290,7 +1344,7 @@ def _entity_path(self, state):
# This has been factored out so that it can be easily
# overloaded by Configurations, which has to switch its
# entities' endpoints from its own properties/ to configs/.
- raw_path = urllib.unquote(state.links.alternate)
+ raw_path = urllib.parse.unquote(state.links.alternate)
if 'servicesNS/' in raw_path:
return _trailing(raw_path, 'servicesNS/', '/', '/')
elif 'services/' in raw_path:
@@ -1424,7 +1478,7 @@ def iter(self, offset=0, count=None, pagesize=None, **kwargs):
if pagesize is None or N < pagesize:
break
offset += N
- logging.debug("pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s", pagesize, fetched, offset, N, kwargs)
+ logger.debug("pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s", pagesize, fetched, offset, N, kwargs)
# kwargs: count, offset, search, sort_dir, sort_key, sort_mode
def list(self, count=None, **kwargs):
@@ -1534,7 +1588,7 @@ def create(self, name, **params):
applications = s.apps
new_app = applications.create("my_fake_app")
"""
- if not isinstance(name, basestring):
+ if not isinstance(name, six.string_types):
raise InvalidNameException("%s is not a valid name for an entity." % name)
if 'namespace' in params:
namespace = params.pop('namespace')
@@ -1623,9 +1677,9 @@ def get(self, name="", owner=None, app=None, sharing=None, **query):
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
- Example:
-
- import splunklib.client
+ **Example**::
+
+ import splunklib.client
s = client.service(...)
saved_searches = s.saved_searches
saved_searches.get("my/saved/search") == \\
@@ -1678,7 +1732,7 @@ def __getitem__(self, key):
# The superclass implementation is designed for collections that contain
# entities. This collection (Configurations) contains collections
# (ConfigurationFile).
- #
+ #
# The configurations endpoint returns multiple entities when we ask for a single file.
# This screws up the default implementation of __getitem__ from Collection, which thinks
# that multiple entities means a name collision, so we have to override it here.
@@ -1717,7 +1771,7 @@ def create(self, name):
# This has to be overridden to handle the plumbing of creating
# a ConfigurationFile (which is a Collection) instead of some
# Entity.
- if not isinstance(name, basestring):
+ if not isinstance(name, six.string_types):
raise ValueError("Invalid name: %s" % repr(name))
response = self.post(__conf=name)
if response.status == 303:
@@ -1742,9 +1796,9 @@ class Stanza(Entity):
"""This class contains a single configuration stanza."""
def submit(self, stanza):
- """Adds keys to the current configuration stanza as a
+ """Adds keys to the current configuration stanza as a
dictionary of key-value pairs.
-
+
:param stanza: A dictionary of key-value pairs for the stanza.
:type stanza: ``dict``
:return: The :class:`Stanza` object.
@@ -1811,7 +1865,7 @@ def create(self, password, username, realm=None):
:return: The :class:`StoragePassword` object created.
"""
- if not isinstance(username, basestring):
+ if not isinstance(username, six.string_types):
raise ValueError("Invalid name: %s" % repr(username))
if realm is None:
@@ -1852,7 +1906,7 @@ def delete(self, username, realm=None):
name = UrlEncoded(realm, encode_slash=True) + ":" + UrlEncoded(username, encode_slash=True)
# Append the : expected at the end of the name
- if name[-1] is not ":":
+ if name[-1] != ":":
name = name + ":"
return Collection.delete(self, name)
@@ -1935,9 +1989,11 @@ def attach(self, host=None, source=None, sourcetype=None):
if host is not None: args['host'] = host
if source is not None: args['source'] = source
if sourcetype is not None: args['sourcetype'] = sourcetype
- path = UrlEncoded(PATH_RECEIVERS_STREAM + "?" + urllib.urlencode(args), skip_encode=True)
+ path = UrlEncoded(PATH_RECEIVERS_STREAM + "?" + urllib.parse.urlencode(args), skip_encode=True)
- cookie_or_auth_header = "Authorization: %s\r\n" % self.service.token
+ cookie_or_auth_header = "Authorization: Splunk %s\r\n" % \
+ (self.service.token if self.service.token is _NoAuthenticationToken
+ else self.service.token.replace("Splunk ", ""))
# If we have cookie(s), use them instead of "Authorization: ..."
if self.service.has_cookies():
@@ -1947,13 +2003,13 @@ def attach(self, host=None, source=None, sourcetype=None):
# the connection open and use the Splunk extension headers to note
# the input mode
sock = self.service.connect()
- headers = ["POST %s HTTP/1.1\r\n" % self.service._abspath(path),
- "Host: %s:%s\r\n" % (self.service.host, int(self.service.port)),
- "Accept-Encoding: identity\r\n",
- cookie_or_auth_header,
- "X-Splunk-Input-Mode: Streaming\r\n",
- "\r\n"]
-
+ headers = [("POST %s HTTP/1.1\r\n" % str(self.service._abspath(path))).encode('utf-8'),
+ ("Host: %s:%s\r\n" % (self.service.host, int(self.service.port))).encode('utf-8'),
+ b"Accept-Encoding: identity\r\n",
+ cookie_or_auth_header.encode('utf-8'),
+ b"X-Splunk-Input-Mode: Streaming\r\n",
+ b"\r\n"]
+
for h in headers:
sock.write(h)
return sock
@@ -2026,8 +2082,7 @@ def clean(self, timeout=60):
self.refresh()
if self.content.totalEventCount != '0':
- raise OperationError, "Cleaning index %s took longer than %s seconds; timing out." %\
- (self.name, timeout)
+ raise OperationError("Cleaning index %s took longer than %s seconds; timing out." % (self.name, timeout))
finally:
# Restore original values
self.update(maxTotalDataSizeMB=tds, frozenTimePeriodInSecs=ftp)
@@ -2065,10 +2120,6 @@ def submit(self, event, host=None, source=None, sourcetype=None):
if source is not None: args['source'] = source
if sourcetype is not None: args['sourcetype'] = sourcetype
- # The reason we use service.request directly rather than POST
- # is that we are not sending a POST request encoded using
- # x-www-form-urlencoded (as we do not have a key=value body),
- # because we aren't really sending a "form".
self.service.post(PATH_RECEIVERS_SIMPLE, body=event, **args)
return self
@@ -2295,7 +2346,7 @@ def create(self, name, kind, **kwargs):
path = _path(
self.path + kindpath,
'%s:%s' % (kwargs['restrictToHost'], name) \
- if kwargs.has_key('restrictToHost') else name
+ if 'restrictToHost' in kwargs else name
)
return Input(self.service, path, kind)
@@ -2430,15 +2481,12 @@ def kindpath(self, kind):
:return: The relative endpoint path.
:rtype: ``string``
"""
- if kind in self.kinds:
- return UrlEncoded(kind, skip_encode=True)
- # Special cases
- elif kind == 'tcp':
+ if kind == 'tcp':
return UrlEncoded('tcp/raw', skip_encode=True)
elif kind == 'splunktcp':
return UrlEncoded('tcp/cooked', skip_encode=True)
else:
- raise ValueError("No such kind on server: %s" % kind)
+ return UrlEncoded(kind, skip_encode=True)
def list(self, *kinds, **kwargs):
"""Returns a list of inputs that are in the :class:`Inputs` collection.
@@ -2499,13 +2547,13 @@ def list(self, *kinds, **kwargs):
kinds = self.kinds
if len(kinds) == 1:
kind = kinds[0]
- logging.debug("Inputs.list taking short circuit branch for single kind.")
+ logger.debug("Inputs.list taking short circuit branch for single kind.")
path = self.kindpath(kind)
- logging.debug("Path for inputs: %s", path)
+ logger.debug("Path for inputs: %s", path)
try:
path = UrlEncoded(path, skip_encode=True)
response = self.get(path, **kwargs)
- except HTTPError, he:
+ except HTTPError as he:
if he.status == 404: # No inputs of this kind
return []
entities = []
@@ -2517,7 +2565,7 @@ def list(self, *kinds, **kwargs):
# Unquote the URL, since all URL encoded in the SDK
# should be of type UrlEncoded, and all str should not
# be URL encoded.
- path = urllib.unquote(state.links.alternate)
+ path = urllib.parse.unquote(state.links.alternate)
entity = Input(self.service, path, kind, state=state)
entities.append(entity)
return entities
@@ -2543,7 +2591,7 @@ def list(self, *kinds, **kwargs):
# Unquote the URL, since all URL encoded in the SDK
# should be of type UrlEncoded, and all str should not
# be URL encoded.
- path = urllib.unquote(state.links.alternate)
+ path = urllib.parse.unquote(state.links.alternate)
entity = Input(self.service, path, kind, state=state)
entities.append(entity)
if 'offset' in kwargs:
@@ -2719,9 +2767,8 @@ def pause(self):
return self
def results(self, **query_params):
- """Returns a streaming handle to this job's search results. To get a
- nice, Pythonic iterator, pass the handle to :class:`splunklib.results.ResultsReader`,
- as in::
+ """Returns a streaming handle to this job's search results. To get a nice, Pythonic iterator, pass the handle
+ to :class:`splunklib.results.JSONResultsReader` along with the query param "output_mode='json'", as in::
import splunklib.client as client
import splunklib.results as results
@@ -2730,7 +2777,7 @@ def results(self, **query_params):
job = service.jobs.create("search * | head 5")
while not job.is_done():
sleep(.2)
- rr = results.ResultsReader(job.results())
+ rr = results.JSONResultsReader(job.results(output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -2760,19 +2807,17 @@ def results(self, **query_params):
def preview(self, **query_params):
"""Returns a streaming handle to this job's preview search results.
- Unlike :class:`splunklib.results.ResultsReader`, which requires a job to
- be finished to
- return any results, the ``preview`` method returns any results that have
- been generated so far, whether the job is running or not. The
- returned search results are the raw data from the server. Pass
- the handle returned to :class:`splunklib.results.ResultsReader` to get a
- nice, Pythonic iterator over objects, as in::
+ Unlike :class:`splunklib.results.JSONResultsReader`along with the query param "output_mode='json'",
+ which requires a job to be finished to return any results, the ``preview`` method returns any results that
+ have been generated so far, whether the job is running or not. The returned search results are the raw data
+ from the server. Pass the handle returned to :class:`splunklib.results.JSONResultsReader` to get a nice,
+ Pythonic iterator over objects, as in::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
job = service.jobs.create("search * | head 5")
- rr = results.ResultsReader(job.preview())
+ rr = results.JSONResultsReader(job.preview(output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -2923,19 +2968,19 @@ def create(self, query, **kwargs):
if kwargs.get("exec_mode", None) == "oneshot":
raise TypeError("Cannot specify exec_mode=oneshot; use the oneshot method instead.")
response = self.post(search=query, **kwargs)
- sid = _load_sid(response)
+ sid = _load_sid(response, kwargs.get("output_mode", None))
return Job(self.service, sid)
def export(self, query, **params):
- """Runs a search and immediately starts streaming preview events.
- This method returns a streaming handle to this job's events as an XML
- document from the server. To parse this stream into usable Python objects,
- pass the handle to :class:`splunklib.results.ResultsReader`::
+ """Runs a search and immediately starts streaming preview events. This method returns a streaming handle to
+ this job's events as an XML document from the server. To parse this stream into usable Python objects,
+ pass the handle to :class:`splunklib.results.JSONResultsReader` along with the query param
+ "output_mode='json'"::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
- rr = results.ResultsReader(service.jobs.export("search * | head 5"))
+ rr = results.JSONResultsReader(service.jobs.export("search * | head 5",output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -2984,14 +3029,14 @@ def itemmeta(self):
def oneshot(self, query, **params):
"""Run a oneshot search and returns a streaming handle to the results.
- The ``InputStream`` object streams XML fragments from the server. To
- parse this stream into usable Python objects,
- pass the handle to :class:`splunklib.results.ResultsReader`::
+ The ``InputStream`` object streams fragments from the server. To parse this stream into usable Python
+ objects, pass the handle to :class:`splunklib.results.JSONResultsReader` along with the query param
+ "output_mode='json'" ::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
- rr = results.ResultsReader(service.jobs.oneshot("search * | head 5"))
+ rr = results.JSONResultsReader(service.jobs.oneshot("search * | head 5",output_mode='json'))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
@@ -3139,7 +3184,7 @@ def dispatch(self, **kwargs):
:return: The :class:`Job`.
"""
response = self.post("dispatch", **kwargs)
- sid = _load_sid(response)
+ sid = _load_sid(response, kwargs.get("output_mode", None))
return Job(self.service, sid)
@property
@@ -3360,7 +3405,7 @@ def create(self, username, password, roles, **params):
boris = users.create("boris", "securepassword", roles="user")
hilda = users.create("hilda", "anotherpassword", roles=["user","power"])
"""
- if not isinstance(username, basestring):
+ if not isinstance(username, six.string_types):
raise ValueError("Invalid username: %s" % str(username))
username = username.lower()
self.post(name=username, password=password, roles=roles, **params)
@@ -3371,7 +3416,7 @@ def create(self, username, password, roles, **params):
state = _parse_atom_entry(entry)
entity = self.item(
self.service,
- urllib.unquote(state.links.alternate),
+ urllib.parse.unquote(state.links.alternate),
state=state)
return entity
@@ -3483,7 +3528,7 @@ def create(self, name, **params):
roles = c.roles
paltry = roles.create("paltry", imported_roles="user", defaultApp="search")
"""
- if not isinstance(name, basestring):
+ if not isinstance(name, six.string_types):
raise ValueError("Invalid role name: %s" % str(name))
name = name.lower()
self.post(name=name, **params)
@@ -3494,7 +3539,7 @@ def create(self, name, **params):
state = _parse_atom_entry(entry)
entity = self.item(
self.service,
- urllib.unquote(state.links.alternate),
+ urllib.parse.unquote(state.links.alternate),
state=state)
return entity
@@ -3545,11 +3590,11 @@ def create(self, name, indexes = {}, fields = {}, **kwargs):
:return: Result of POST request
"""
- for k, v in indexes.iteritems():
+ for k, v in six.iteritems(indexes):
if isinstance(v, dict):
v = json.dumps(v)
kwargs['index.' + k] = v
- for k, v in fields.iteritems():
+ for k, v in six.iteritems(fields):
kwargs['field.' + k] = v
return self.post(name=name, **kwargs)
@@ -3558,7 +3603,7 @@ class KVStoreCollection(Entity):
def data(self):
"""Returns data object for this Collection.
- :rtype: :class:`KVStoreData`
+ :rtype: :class:`KVStoreCollectionData`
"""
return KVStoreCollectionData(self)
@@ -3573,7 +3618,7 @@ def update_index(self, name, value):
:return: Result of POST request
"""
kwargs = {}
- kwargs['index.' + name] = value if isinstance(value, basestring) else json.dumps(value)
+ kwargs['index.' + name] = value if isinstance(value, six.string_types) else json.dumps(value)
return self.post(**kwargs)
def update_field(self, name, value):
@@ -3601,7 +3646,7 @@ def __init__(self, collection):
self.service = collection.service
self.collection = collection
self.owner, self.app, self.sharing = collection._proper_namespace()
- self.path = 'storage/collections/data/' + UrlEncoded(self.collection.name) + '/'
+ self.path = 'storage/collections/data/' + UrlEncoded(self.collection.name, encode_slash=True) + '/'
def _get(self, url, **kwargs):
return self.service.get(self.path + url, owner=self.owner, app=self.app, sharing=self.sharing, **kwargs)
@@ -3622,7 +3667,12 @@ def query(self, **query):
:return: Array of documents retrieved by query.
:rtype: ``array``
"""
- return json.loads(self._get('', **query).body.read())
+
+ for key, value in query.items():
+ if isinstance(query[key], dict):
+ query[key] = json.dumps(value)
+
+ return json.loads(self._get('', **query).body.read().decode('utf-8'))
def query_by_id(self, id):
"""
@@ -3634,7 +3684,7 @@ def query_by_id(self, id):
:return: Document with id
:rtype: ``dict``
"""
- return json.loads(self._get(UrlEncoded(str(id))).body.read())
+ return json.loads(self._get(UrlEncoded(str(id), encode_slash=True)).body.read().decode('utf-8'))
def insert(self, data):
"""
@@ -3646,7 +3696,9 @@ def insert(self, data):
:return: _id of inserted object
:rtype: ``dict``
"""
- return json.loads(self._post('', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read())
+ if isinstance(data, dict):
+ data = json.dumps(data)
+ return json.loads(self._post('', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
def delete(self, query=None):
"""
@@ -3668,7 +3720,7 @@ def delete_by_id(self, id):
:return: Result of DELETE request
"""
- return self._delete(UrlEncoded(str(id)))
+ return self._delete(UrlEncoded(str(id), encode_slash=True))
def update(self, id, data):
"""
@@ -3682,7 +3734,9 @@ def update(self, id, data):
:return: id of replaced document
:rtype: ``dict``
"""
- return json.loads(self._post(UrlEncoded(str(id)), headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read())
+ if isinstance(data, dict):
+ data = json.dumps(data)
+ return json.loads(self._post(UrlEncoded(str(id), encode_slash=True), headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
def batch_find(self, *dbqueries):
"""
@@ -3690,16 +3744,16 @@ def batch_find(self, *dbqueries):
:param dbqueries: Array of individual queries as dictionaries
:type dbqueries: ``array`` of ``dict``
-
+
:return: Results of each query
:rtype: ``array`` of ``array``
"""
- if len(dbqueries) < 1:
+ if len(dbqueries) < 1:
raise Exception('Must have at least one query.')
-
+
data = json.dumps(dbqueries)
- return json.loads(self._post('batch_find', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read())
+ return json.loads(self._post('batch_find', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
def batch_save(self, *documents):
"""
@@ -3707,13 +3761,13 @@ def batch_save(self, *documents):
:param documents: Array of documents to save as dictionaries
:type documents: ``array`` of ``dict``
-
+
:return: Results of update operation as overall stats
:rtype: ``dict``
"""
- if len(documents) < 1:
+ if len(documents) < 1:
raise Exception('Must have at least one document.')
-
+
data = json.dumps(documents)
- return json.loads(self._post('batch_save', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read())
+ return json.loads(self._post('batch_save', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8'))
diff --git a/bin/ta_dmarc/splunklib/data.py b/bin/ta_dmarc/splunklib/data.py
index 61431d9..f9ffb86 100644
--- a/bin/ta_dmarc/splunklib/data.py
+++ b/bin/ta_dmarc/splunklib/data.py
@@ -16,7 +16,10 @@
format, which is the format used by most of the REST API.
"""
+from __future__ import absolute_import
+import sys
from xml.etree.ElementTree import XML
+from splunklib import six
__all__ = ["load"]
@@ -74,6 +77,11 @@ def load(text, match=None):
'namespaces': [],
'names': {}
}
+
+ # Convert to unicode encoding in only python 2 for xml parser
+ if(sys.version_info < (3, 0, 0) and isinstance(text, unicode)):
+ text = text.encode('utf-8')
+
root = XML(text)
items = [root] if match is None else root.findall(match)
count = len(items)
@@ -88,7 +96,7 @@ def load(text, match=None):
def load_attrs(element):
if not hasattrs(element): return None
attrs = record()
- for key, value in element.attrib.iteritems():
+ for key, value in six.iteritems(element.attrib):
attrs[key] = value
return attrs
@@ -110,12 +118,12 @@ def load_elem(element, nametable=None):
if attrs is None: return name, value
if value is None: return name, attrs
# If value is simple, merge into attrs dict using special key
- if isinstance(value, str):
+ if isinstance(value, six.string_types):
attrs["$text"] = value
return name, attrs
# Both attrs & value are complex, so merge the two dicts, resolving collisions.
collision_keys = []
- for key, val in attrs.iteritems():
+ for key, val in six.iteritems(attrs):
if key in value and key in collision_keys:
value[key].append(val)
elif key in value and key not in collision_keys:
@@ -153,8 +161,8 @@ def load_value(element, nametable=None):
text = element.text
if text is None:
return None
- text = text.strip()
- if len(text) == 0:
+
+ if len(text.strip()) == 0:
return None
return text
@@ -169,7 +177,7 @@ def load_value(element, nametable=None):
for child in children:
name, item = load_elem(child, nametable)
# If we have seen this name before, promote the value to a list
- if value.has_key(name):
+ if name in value:
current = value[name]
if not isinstance(current, list):
value[name] = [current]
@@ -227,7 +235,7 @@ def __getitem__(self, key):
return dict.__getitem__(self, key)
key += self.sep
result = record()
- for k,v in self.iteritems():
+ for k,v in six.iteritems(self):
if not k.startswith(key):
continue
suffix = k[len(key):]
diff --git a/bin/ta_dmarc/splunklib/modularinput/argument.py b/bin/ta_dmarc/splunklib/modularinput/argument.py
index fed7bed..04214d1 100644
--- a/bin/ta_dmarc/splunklib/modularinput/argument.py
+++ b/bin/ta_dmarc/splunklib/modularinput/argument.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from __future__ import absolute_import
try:
import xml.etree.ElementTree as ET
except ImportError:
@@ -53,9 +54,9 @@ def __init__(self, name, description=None, validation=None,
:param name: ``string``, identifier for this argument in Splunk.
:param description: ``string``, human-readable description of the argument.
:param validation: ``string`` specifying how the argument should be validated, if using internal validation.
- If using external validation, this will be ignored.
+ If using external validation, this will be ignored.
:param data_type: ``string``, data type of this field; use the class constants.
- "data_type_boolean", "data_type_number", or "data_type_string".
+ "data_type_boolean", "data_type_number", or "data_type_string".
:param required_on_edit: ``Boolean``, whether this arg is required when editing an existing modular input of this kind.
:param required_on_create: ``Boolean``, whether this arg is required when creating a modular input of this kind.
:param title: ``String``, a human-readable title for the argument.
diff --git a/bin/ta_dmarc/splunklib/modularinput/event.py b/bin/ta_dmarc/splunklib/modularinput/event.py
index de1d4f1..9cd6cf3 100644
--- a/bin/ta_dmarc/splunklib/modularinput/event.py
+++ b/bin/ta_dmarc/splunklib/modularinput/event.py
@@ -12,6 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from __future__ import absolute_import
+from io import TextIOBase
+from splunklib.six import ensure_text
+
try:
import xml.etree.cElementTree as ET
except ImportError as ie:
@@ -103,5 +107,8 @@ def write_to(self, stream):
if self.done:
ET.SubElement(event, "done")
- stream.write(ET.tostring(event))
+ if isinstance(stream, TextIOBase):
+ stream.write(ensure_text(ET.tostring(event)))
+ else:
+ stream.write(ET.tostring(event))
stream.flush()
\ No newline at end of file
diff --git a/bin/ta_dmarc/splunklib/modularinput/event_writer.py b/bin/ta_dmarc/splunklib/modularinput/event_writer.py
old mode 100644
new mode 100755
index 418405f..5f8c5aa
--- a/bin/ta_dmarc/splunklib/modularinput/event_writer.py
+++ b/bin/ta_dmarc/splunklib/modularinput/event_writer.py
@@ -12,18 +12,19 @@
# License for the specific language governing permissions and limitations
# under the License.
+from __future__ import absolute_import
import sys
+from splunklib.six import ensure_str
from .event import ET
try:
- from cStringIO import StringIO
+ from splunklib.six.moves import cStringIO as StringIO
except ImportError:
- from StringIO import StringIO
+ from splunklib.six import StringIO
class EventWriter(object):
"""``EventWriter`` writes events and error messages to Splunk from a modular input.
-
Its two important methods are ``writeEvent``, which takes an ``Event`` object,
and ``log``, which takes a severity and an error message.
"""
@@ -76,9 +77,11 @@ def write_xml_document(self, document):
:param document: An ``ElementTree`` object.
"""
- self._out.write(ET.tostring(document))
+ self._out.write(ensure_str(ET.tostring(document)))
self._out.flush()
def close(self):
"""Write the closing tag to make this XML well formed."""
- self._out.write("")
\ No newline at end of file
+ if self.header_written:
+ self._out.write("")
+ self._out.flush()
diff --git a/bin/ta_dmarc/splunklib/modularinput/input_definition.py b/bin/ta_dmarc/splunklib/modularinput/input_definition.py
index 3a2e1fa..fdc7cbb 100644
--- a/bin/ta_dmarc/splunklib/modularinput/input_definition.py
+++ b/bin/ta_dmarc/splunklib/modularinput/input_definition.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from __future__ import absolute_import
try:
import xml.etree.cElementTree as ET
except ImportError as ie:
diff --git a/bin/ta_dmarc/splunklib/modularinput/scheme.py b/bin/ta_dmarc/splunklib/modularinput/scheme.py
index c3aa812..4104e4a 100644
--- a/bin/ta_dmarc/splunklib/modularinput/scheme.py
+++ b/bin/ta_dmarc/splunklib/modularinput/scheme.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from __future__ import absolute_import
try:
import xml.etree.cElementTree as ET
except ImportError:
@@ -54,7 +55,7 @@ def add_argument(self, arg):
def to_xml(self):
"""Creates an ``ET.Element`` representing self, then returns it.
- :returns root, an ``ET.Element`` representing this scheme.
+ :returns: an ``ET.Element`` representing this scheme.
"""
root = ET.Element("scheme")
diff --git a/bin/ta_dmarc/splunklib/modularinput/script.py b/bin/ta_dmarc/splunklib/modularinput/script.py
index dddca8a..8595dc4 100644
--- a/bin/ta_dmarc/splunklib/modularinput/script.py
+++ b/bin/ta_dmarc/splunklib/modularinput/script.py
@@ -12,14 +12,16 @@
# License for the specific language governing permissions and limitations
# under the License.
+from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
-from urlparse import urlsplit
+from splunklib.six.moves.urllib.parse import urlsplit
import sys
from ..client import Service
from .event_writer import EventWriter
from .input_definition import InputDefinition
from .validation_definition import ValidationDefinition
+from splunklib import six
try:
import xml.etree.cElementTree as ET
@@ -27,7 +29,7 @@
import xml.etree.ElementTree as ET
-class Script(object):
+class Script(six.with_metaclass(ABCMeta, object)):
"""An abstract base class for implementing modular inputs.
Subclasses should override ``get_scheme``, ``stream_events``,
@@ -37,7 +39,6 @@ class Script(object):
The ``run`` function is used to run modular inputs; it typically should
not be overridden.
"""
- __metaclass__ = ABCMeta
def __init__(self):
self._input_definition = None
@@ -101,10 +102,10 @@ def run_script(self, args, event_writer, input_stream):
err_string = "ERROR Invalid arguments to modular input script:" + ' '.join(
args)
event_writer._err.write(err_string)
+ return 1
except Exception as e:
- err_string = EventWriter.ERROR + str(e.message)
- event_writer._err.write(err_string)
+ event_writer.log(EventWriter.ERROR, str(e))
return 1
@property
@@ -116,9 +117,9 @@ def service(self):
available as soon as the :code:`Script.stream_events` method is
called.
- :return: :class:splunklib.client.Service. A value of None is returned,
- if you call this method before the :code:`Script.stream_events` method
- is called.
+ :return: :class:`splunklib.client.Service`. A value of None is returned,
+ if you call this method before the :code:`Script.stream_events` method
+ is called.
"""
if self._service is not None:
diff --git a/bin/ta_dmarc/splunklib/modularinput/utils.py b/bin/ta_dmarc/splunklib/modularinput/utils.py
index f9de82f..3d42b63 100644
--- a/bin/ta_dmarc/splunklib/modularinput/utils.py
+++ b/bin/ta_dmarc/splunklib/modularinput/utils.py
@@ -14,6 +14,8 @@
# File for utility functions
+from __future__ import absolute_import
+from splunklib.six.moves import zip
def xml_compare(expected, found):
"""Checks equality of two ``ElementTree`` objects.
@@ -62,11 +64,14 @@ def parse_parameters(param_node):
def parse_xml_data(parent_node, child_node_tag):
data = {}
for child in parent_node:
+ child_name = child.get("name")
if child.tag == child_node_tag:
if child_node_tag == "stanza":
- data[child.get("name")] = {}
+ data[child_name] = {
+ "__app": child.get("app", None)
+ }
for param in child:
- data[child.get("name")][param.get("name")] = parse_parameters(param)
+ data[child_name][param.get("name")] = parse_parameters(param)
elif "item" == parent_node.tag:
- data[child.get("name")] = parse_parameters(child)
- return data
\ No newline at end of file
+ data[child_name] = parse_parameters(child)
+ return data
diff --git a/bin/ta_dmarc/splunklib/modularinput/validation_definition.py b/bin/ta_dmarc/splunklib/modularinput/validation_definition.py
index 72f8e7b..3bbe976 100644
--- a/bin/ta_dmarc/splunklib/modularinput/validation_definition.py
+++ b/bin/ta_dmarc/splunklib/modularinput/validation_definition.py
@@ -13,6 +13,7 @@
# under the License.
+from __future__ import absolute_import
try:
import xml.etree.cElementTree as ET
except ImportError as ie:
@@ -27,7 +28,7 @@ class ValidationDefinition(object):
**Example**::
- ``v = ValidationDefinition()``
+ v = ValidationDefinition()
"""
def __init__(self):
@@ -45,23 +46,25 @@ def parse(stream):
The XML typically will look like this:
- ````
- `` myHost``
- `` https://127.0.0.1:8089``
- `` 123102983109283019283``
- `` /opt/splunk/var/lib/splunk/modinputs``
- ``
- ``
- `` value1``
- `` ``
- `` value2``
- `` value3``
- `` value4``
- `` ``
- ``
``
- ````
+ .. code-block:: xml
+
+
+ myHost
+ https://127.0.0.1:8089
+ 123102983109283019283
+ /opt/splunk/var/lib/splunk/modinputs
+ -
+ value1
+
+ value2
+ value3
+ value4
+
+
+
:param stream: ``Stream`` containing XML to parse.
- :return definition: A ``ValidationDefinition`` object.
+ :return: A ``ValidationDefinition`` object.
"""
diff --git a/bin/ta_dmarc/splunklib/results.py b/bin/ta_dmarc/splunklib/results.py
index ffc9b0b..8543ab0 100644
--- a/bin/ta_dmarc/splunklib/results.py
+++ b/bin/ta_dmarc/splunklib/results.py
@@ -23,7 +23,7 @@
accessing search results while avoiding buffering the result set, which can be
very large.
-To use the reader, instantiate :class:`ResultsReader` on a search result stream
+To use the reader, instantiate :class:`JSONResultsReader` on a search result stream
as follows:::
reader = ResultsReader(result_stream)
@@ -32,26 +32,34 @@
print "Results are a preview: %s" % reader.is_preview
"""
+from __future__ import absolute_import
+
+from io import BufferedReader, BytesIO
+
+from splunklib import six
+
+from splunklib.six import deprecated
+
try:
import xml.etree.cElementTree as et
except:
import xml.etree.ElementTree as et
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from .ordereddict import OrderedDict
+from collections import OrderedDict
+from json import loads as json_loads
try:
- from cStringIO import StringIO
+ from splunklib.six.moves import cStringIO as StringIO
except:
- from StringIO import StringIO
+ from splunklib.six import StringIO
__all__ = [
"ResultsReader",
- "Message"
+ "Message",
+ "JSONResultsReader"
]
+
class Message(object):
"""This class represents informational messages that Splunk interleaves in the results stream.
@@ -62,6 +70,7 @@ class Message(object):
m = Message("DEBUG", "There's something in that variable...")
"""
+
def __init__(self, type_, message):
self.type = type_
self.message = message
@@ -75,6 +84,7 @@ def __eq__(self, other):
def __hash__(self):
return hash((self.type, self.message))
+
class _ConcatenatedStream(object):
"""Lazily concatenate zero or more streams into a stream.
@@ -87,6 +97,7 @@ class _ConcatenatedStream(object):
s = _ConcatenatedStream(StringIO("abc"), StringIO("def"))
assert s.read() == "abcdef"
"""
+
def __init__(self, *streams):
self.streams = list(streams)
@@ -95,16 +106,17 @@ def read(self, n=None):
If *n* is ``None``, return all available characters.
"""
- response = ""
+ response = b""
while len(self.streams) > 0 and (n is None or n > 0):
txt = self.streams[0].read(n)
response += txt
if n is not None:
n -= len(txt)
- if n > 0 or n is None:
+ if n is None or n > 0:
del self.streams[0]
return response
+
class _XMLDTDFilter(object):
"""Lazily remove all XML DTDs from a stream.
@@ -118,6 +130,7 @@ class _XMLDTDFilter(object):
s = _XMLDTDFilter("")
assert s.read() == ""
"""
+
def __init__(self, stream):
self.stream = stream
@@ -126,17 +139,17 @@ def read(self, n=None):
If *n* is ``None``, return all available characters.
"""
- response = ""
+ response = b""
while n is None or n > 0:
c = self.stream.read(1)
- if c == "":
+ if c == b"":
break
- elif c == "<":
+ elif c == b"<":
c += self.stream.read(1)
- if c == "":
+ if c == b"":
while True:
q = self.stream.read(1)
- if q == ">":
+ if q == b">":
break
else:
response += c
@@ -148,6 +161,8 @@ def read(self, n=None):
n -= 1
return response
+
+@deprecated("Use the JSONResultsReader function instead in conjuction with the 'output_mode' query param set to 'json'")
class ResultsReader(object):
"""This class returns dictionaries and Splunk messages from an XML results
stream.
@@ -175,6 +190,7 @@ class ResultsReader(object):
print "Message: %s" % result
print "is_preview = %s " % reader.is_preview
"""
+
# Be sure to update the docstrings of client.Jobs.oneshot,
# client.Job.results_preview and client.Job.results to match any
# changes made to ResultsReader.
@@ -194,7 +210,7 @@ def __init__(self, stream):
# we remove all the DTD definitions inline, then wrap the
# fragments in a fiction element to make the parser happy.
stream = _XMLDTDFilter(stream)
- stream = _ConcatenatedStream(StringIO(""), stream, StringIO(""))
+ stream = _ConcatenatedStream(BytesIO(b""), stream, BytesIO(b""))
self.is_preview = None
self._gen = self._parse_results(stream)
@@ -202,7 +218,9 @@ def __iter__(self):
return self
def next(self):
- return self._gen.next()
+ return next(self._gen)
+
+ __next__ = next
def _parse_results(self, stream):
"""Parse results and messages out of *stream*."""
@@ -233,7 +251,7 @@ def _parse_results(self, stream):
if event == 'start':
values = []
elif event == 'end':
- field_name = elem.attrib['k'].encode('utf8')
+ field_name = elem.attrib['k']
if len(values) == 1:
result[field_name] = values[0]
else:
@@ -253,19 +271,19 @@ def _parse_results(self, stream):
# So we'll define it here
def __itertext(self):
- tag = self.tag
- if not isinstance(tag, basestring) and tag is not None:
- return
- if self.text:
- yield self.text
- for e in self:
- for s in __itertext(e):
- yield s
- if e.tail:
- yield e.tail
+ tag = self.tag
+ if not isinstance(tag, six.string_types) and tag is not None:
+ return
+ if self.text:
+ yield self.text
+ for e in self:
+ for s in __itertext(e):
+ yield s
+ if e.tail:
+ yield e.tail
text = "".join(__itertext(elem))
- values.append(text.encode('utf8'))
+ values.append(text)
elem.clear()
elif elem.tag == 'msg':
@@ -273,7 +291,7 @@ def __itertext(self):
msg_type = elem.attrib['type']
elif event == 'end':
text = elem.text if elem.text is not None else ""
- yield Message(msg_type, text.encode('utf8'))
+ yield Message(msg_type, text)
elem.clear()
except SyntaxError as pe:
# This is here to handle the same incorrect return from
@@ -284,5 +302,72 @@ def __itertext(self):
raise
+class JSONResultsReader(object):
+ """This class returns dictionaries and Splunk messages from a JSON results
+ stream.
+ ``JSONResultsReader`` is iterable, and returns a ``dict`` for results, or a
+ :class:`Message` object for Splunk messages. This class has one field,
+ ``is_preview``, which is ``True`` when the results are a preview from a
+ running search, or ``False`` when the results are from a completed search.
+
+ This function has no network activity other than what is implicit in the
+ stream it operates on.
+ :param `stream`: The stream to read from (any object that supports``.read()``).
+ **Example**::
+
+ import results
+ response = ... # the body of an HTTP response
+ reader = results.JSONResultsReader(response)
+ for result in reader:
+ if isinstance(result, dict):
+ print "Result: %s" % result
+ elif isinstance(result, results.Message):
+ print "Message: %s" % result
+ print "is_preview = %s " % reader.is_preview
+ """
+
+ # Be sure to update the docstrings of client.Jobs.oneshot,
+ # client.Job.results_preview and client.Job.results to match any
+ # changes made to JSONResultsReader.
+ #
+ # This wouldn't be a class, just the _parse_results function below,
+ # except that you cannot get the current generator inside the
+ # function creating that generator. Thus it's all wrapped up for
+ # the sake of one field.
+ def __init__(self, stream):
+ # The search/jobs/exports endpoint, when run with
+ # earliest_time=rt and latest_time=rt, output_mode=json, streams a sequence of
+ # JSON documents, each containing a result, as opposed to one
+ # results element containing lots of results.
+ stream = BufferedReader(stream)
+ self.is_preview = None
+ self._gen = self._parse_results(stream)
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ return next(self._gen)
+
+ __next__ = next
+
+ def _parse_results(self, stream):
+ """Parse results and messages out of *stream*."""
+ for line in stream.readlines():
+ strip_line = line.strip()
+ if strip_line.__len__() == 0: continue
+ parsed_line = json_loads(strip_line)
+ if "preview" in parsed_line:
+ self.is_preview = parsed_line["preview"]
+ if "messages" in parsed_line and parsed_line["messages"].__len__() > 0:
+ for message in parsed_line["messages"]:
+ msg_type = message.get("type", "Unknown Message Type")
+ text = message.get("text")
+ yield Message(msg_type, text)
+ if "result" in parsed_line:
+ yield parsed_line["result"]
+ if "results" in parsed_line:
+ for result in parsed_line["results"]:
+ yield result
diff --git a/bin/ta_dmarc/splunklib/searchcommands/__init__.py b/bin/ta_dmarc/splunklib/searchcommands/__init__.py
index 12b14f3..8a92903 100644
--- a/bin/ta_dmarc/splunklib/searchcommands/__init__.py
+++ b/bin/ta_dmarc/splunklib/searchcommands/__init__.py
@@ -30,7 +30,7 @@
field-name = ( "_" / alpha ) *( alpha / digit / "_" / "." / "-" )
It does not show that :code:`field-name` values may be comma-separated. This is because Splunk strips commas from
- the command line. A search command will never see them.
+ the command line. A search command will never see them.
2. Search commands targeting versions of Splunk prior to 6.3 must be statically configured as follows:
@@ -134,9 +134,13 @@
.. topic:: References
- 1. `Search command style guide `_
+ 1. `Custom Search Command manual: `__
- 2. `Commands.conf.spec `_
+ 2. `Create Custom Search Commands with commands.conf.spec `_
+
+ 3. `Configure seach assistant with searchbnf.conf `_
+
+ 4. `Control search distribution with distsearch.conf `_
"""
diff --git a/bin/ta_dmarc/splunklib/searchcommands/decorators.py b/bin/ta_dmarc/splunklib/searchcommands/decorators.py
index 1a0400f..d8b3f48 100644
--- a/bin/ta_dmarc/splunklib/searchcommands/decorators.py
+++ b/bin/ta_dmarc/splunklib/searchcommands/decorators.py
@@ -15,14 +15,12 @@
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
+from splunklib import six
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from ..ordereddict import OrderedDict
+from collections import OrderedDict # must be python 2.7
from inspect import getmembers, isclass, isfunction
-from itertools import imap
+from splunklib.six.moves import map as imap
from .internals import ConfigurationSettingsType, json_encode_string
from .validators import OptionName
@@ -35,7 +33,7 @@ class Configuration(object):
variable to search command classes that don't have one. The :code:`name` is derived from the name of the class.
By convention command class names end with the word "Command". To derive :code:`name` the word "Command" is removed
from the end of the class name and then converted to lower case for conformance with the `Search command style guide
- `_
+ `__
"""
def __init__(self, o=None, **kwargs):
@@ -69,15 +67,15 @@ def __call__(self, o):
# Set command name
name = o.__name__
- if name.endswith(b'Command'):
- name = name[:-len(b'Command')]
- o.name = unicode(name.lower())
+ if name.endswith('Command'):
+ name = name[:-len('Command')]
+ o.name = six.text_type(name.lower())
# Construct ConfigurationSettings instance for the command class
o.ConfigurationSettings = ConfigurationSettingsType(
- module=o.__module__ + b'.' + o.__name__,
- name=b'ConfigurationSettings',
+ module=o.__module__ + '.' + o.__name__,
+ name='ConfigurationSettings',
bases=(o.ConfigurationSettings,))
ConfigurationSetting.fix_up(o.ConfigurationSettings, self.settings)
@@ -138,7 +136,7 @@ def fix_up(cls, values):
for name, setting in definitions:
if setting._name is None:
- setting._name = name = unicode(name)
+ setting._name = name = six.text_type(name)
else:
name = setting._name
@@ -195,8 +193,8 @@ def is_supported_by_protocol(version):
del values[name]
if len(values) > 0:
- settings = sorted(list(values.iteritems()))
- settings = imap(lambda (n, v): '{}={}'.format(n, repr(v)), settings)
+ settings = sorted(list(six.iteritems(values)))
+ settings = imap(lambda n_v: '{}={}'.format(n_v[0], repr(n_v[1])), settings)
raise AttributeError('Inapplicable configuration settings: ' + ', '.join(settings))
cls.configuration_setting_definitions = definitions
@@ -228,8 +226,9 @@ class Option(property):
Short form (recommended). When you are satisfied with built-in or custom validation behaviors.
- .. code-block:: python
+ .. code-block:: python
:linenos:
+
from splunklib.searchcommands.decorators import Option
from splunklib.searchcommands.validators import Fieldname
@@ -246,8 +245,9 @@ class Option(property):
also provide a deleter. You must be prepared to accept a value of :const:`None` which indicates that your
:code:`Option` is unset.
- .. code-block:: python
+ .. code-block:: python
:linenos:
+
from splunklib.searchcommands import Option
@Option()
@@ -357,7 +357,7 @@ def __init__(self, command, option):
self._option = option
self._is_set = False
validator = self.validator
- self._format = unicode if validator is None else validator.format
+ self._format = six.text_type if validator is None else validator.format
def __repr__(self):
return '(' + repr(self.name) + ', ' + repr(self._format(self.value)) + ')'
@@ -417,24 +417,24 @@ class View(OrderedDict):
def __init__(self, command):
definitions = type(command).option_definitions
item_class = Option.Item
- OrderedDict.__init__(self, imap(lambda (name, option): (option.name, item_class(command, option)), definitions))
+ OrderedDict.__init__(self, ((option.name, item_class(command, option)) for (name, option) in definitions))
def __repr__(self):
- text = 'Option.View([' + ','.join(imap(lambda item: repr(item), self.itervalues())) + '])'
+ text = 'Option.View([' + ','.join(imap(lambda item: repr(item), six.itervalues(self))) + '])'
return text
def __str__(self):
- text = ' '.join([str(item) for item in self.itervalues() if item.is_set])
+ text = ' '.join([str(item) for item in six.itervalues(self) if item.is_set])
return text
# region Methods
def get_missing(self):
- missing = [item.name for item in self.itervalues() if item.is_required and not item.is_set]
+ missing = [item.name for item in six.itervalues(self) if item.is_required and not item.is_set]
return missing if len(missing) > 0 else None
def reset(self):
- for value in self.itervalues():
+ for value in six.itervalues(self):
value.reset()
pass
diff --git a/bin/ta_dmarc/splunklib/searchcommands/environment.py b/bin/ta_dmarc/splunklib/searchcommands/environment.py
index 785042c..e92018f 100644
--- a/bin/ta_dmarc/splunklib/searchcommands/environment.py
+++ b/bin/ta_dmarc/splunklib/searchcommands/environment.py
@@ -18,7 +18,8 @@
from logging import getLogger, root, StreamHandler
from logging.config import fileConfig
-from os import chdir, environ, getcwdu, path
+from os import chdir, environ, path
+from splunklib.six.moves import getcwd
import sys
@@ -96,7 +97,7 @@ def configure_logging(logger_name, filename=None):
filename = path.realpath(filename)
if filename != _current_logging_configuration_file:
- working_directory = getcwdu()
+ working_directory = getcwd()
chdir(app_root)
try:
fileConfig(filename, {'SPLUNK_HOME': splunk_home})
@@ -112,7 +113,7 @@ def configure_logging(logger_name, filename=None):
_current_logging_configuration_file = None
-splunk_home = path.abspath(path.join(getcwdu(), environ.get('SPLUNK_HOME', '')))
+splunk_home = path.abspath(path.join(getcwd(), environ.get('SPLUNK_HOME', '')))
app_file = getattr(sys.modules['__main__'], '__file__', sys.executable)
app_root = path.dirname(path.abspath(path.dirname(app_file)))
diff --git a/bin/ta_dmarc/splunklib/searchcommands/eventing_command.py b/bin/ta_dmarc/splunklib/searchcommands/eventing_command.py
index fde7aad..27dc13a 100644
--- a/bin/ta_dmarc/splunklib/searchcommands/eventing_command.py
+++ b/bin/ta_dmarc/splunklib/searchcommands/eventing_command.py
@@ -16,7 +16,8 @@
from __future__ import absolute_import, division, print_function, unicode_literals
-from itertools import imap
+from splunklib import six
+from splunklib.six.moves import map as imap
from .decorators import ConfigurationSetting
from .search_command import SearchCommand
@@ -113,10 +114,10 @@ class ConfigurationSettings(SearchCommand.ConfigurationSettings):
''')
- type = ConfigurationSetting(readonly=True, value='eventing', doc='''
+ type = ConfigurationSetting(readonly=True, value='events', doc='''
Command type
- Fixed: :const:`'eventing'`.
+ Fixed: :const:`'events'`.
Supported by: SCP 2
@@ -135,8 +136,14 @@ def fix_up(cls, command):
raise AttributeError('No EventingCommand.transform override')
SearchCommand.ConfigurationSettings.fix_up(command)
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
iteritems = SearchCommand.ConfigurationSettings.iteritems(self)
- return imap(lambda (name, value): (name, 'events' if name == 'type' else value), iteritems)
+ return imap(lambda name_value: (name_value[0], 'events' if name_value[0] == 'type' else name_value[1]), iteritems)
+
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
# endregion
diff --git a/bin/ta_dmarc/splunklib/searchcommands/external_search_command.py b/bin/ta_dmarc/splunklib/searchcommands/external_search_command.py
index c71d11b..c230624 100644
--- a/bin/ta_dmarc/splunklib/searchcommands/external_search_command.py
+++ b/bin/ta_dmarc/splunklib/searchcommands/external_search_command.py
@@ -20,6 +20,7 @@
import os
import sys
import traceback
+from splunklib import six
if sys.platform == 'win32':
from signal import signal, CTRL_BREAK_EVENT, SIGBREAK, SIGINT, SIGTERM
@@ -36,11 +37,11 @@ class ExternalSearchCommand(object):
"""
def __init__(self, path, argv=None, environ=None):
- if not isinstance(path, (bytes, unicode)):
+ if not isinstance(path, (bytes, six.text_type)):
raise ValueError('Expected a string value for path, not {}'.format(repr(path)))
self._logger = getLogger(self.__class__.__name__)
- self._path = unicode(path)
+ self._path = six.text_type(path)
self._argv = None
self._environ = None
@@ -89,7 +90,7 @@ def execute(self):
self._execute(self._path, self._argv, self._environ)
except:
error_type, error, tb = sys.exc_info()
- message = 'Command execution failed: ' + unicode(error)
+ message = 'Command execution failed: ' + six.text_type(error)
self._logger.error(message + '\nTraceback:\n' + ''.join(traceback.format_tb(tb)))
sys.exit(1)
@@ -104,13 +105,13 @@ def _execute(path, argv=None, environ=None):
:param argv: Argument list.
:type argv: list or tuple
- The arguments to the child process should start with the name of the command being run, but this is not
- enforced. A value of :const:`None` specifies that the base name of path name :param:`path` should be used.
+ The arguments to the child process should start with the name of the command being run, but this is not
+ enforced. A value of :const:`None` specifies that the base name of path name :param:`path` should be used.
:param environ: A mapping which is used to define the environment variables for the new process.
:type environ: dict or None.
- This mapping is used instead of the current process’s environment. A value of :const:`None` specifies that
- the :data:`os.environ` mapping should be used.
+ This mapping is used instead of the current process’s environment. A value of :const:`None` specifies that
+ the :data:`os.environ` mapping should be used.
:return: None
@@ -142,7 +143,9 @@ def terminate_child():
p.wait()
logger.debug('finished command="%s", arguments=%s, pid=%d, returncode=%d', path, argv, p.pid, p.returncode)
- sys.exit(p.returncode)
+
+ if p.returncode != 0:
+ sys.exit(p.returncode)
@staticmethod
def _search_path(executable, paths):
diff --git a/bin/ta_dmarc/splunklib/searchcommands/generating_command.py b/bin/ta_dmarc/splunklib/searchcommands/generating_command.py
index 3bd0192..6a75d2c 100644
--- a/bin/ta_dmarc/splunklib/searchcommands/generating_command.py
+++ b/bin/ta_dmarc/splunklib/searchcommands/generating_command.py
@@ -15,11 +15,13 @@
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
+import sys
from .decorators import ConfigurationSetting
from .search_command import SearchCommand
-from itertools import imap, ifilter
+from splunklib import six
+from splunklib.six.moves import map as imap, filter as ifilter
# P1 [O] TODO: Discuss generates_timeorder in the class-level documentation for GeneratingCommand
@@ -56,7 +58,7 @@ class GeneratingCommand(SearchCommand):
+==========+=====================================+============================================+
| streams | streaming=True[,local=[True|False]] | type='streaming'[,distributed=[true|false] |
+----------+-------------------------------------+--------------------------------------------+
- | events | retainsevents=True, streaming=False | type='eventing' |
+ | events | retainsevents=True, streaming=False | type='events' |
+----------+-------------------------------------+--------------------------------------------+
| reports | streaming=False | type='reporting' |
+----------+-------------------------------------+--------------------------------------------+
@@ -92,9 +94,10 @@ class StreamingGeneratingCommand(GeneratingCommand)
+==========+===================================================+===================================================+
| streams | 1. Add this line to your command's stanza in | 1. Add this configuration setting to your code: |
| | | |
- | | default/commands.conf. | .. code-block:: python |
- | | .. code-block:: python | @Configuration(distributed=True) |
- | | local = false | class SomeCommand(GeneratingCommand) |
+ | | default/commands.conf:: | .. code-block:: python |
+ | | | |
+ | | local = false | @Configuration(distributed=True) |
+ | | | class SomeCommand(GeneratingCommand) |
| | | ... |
| | 2. Restart splunk | |
| | | 2. You are good to go; no need to restart Splunk |
@@ -112,29 +115,33 @@ class StreamingGeneratingCommand(GeneratingCommand)
| | settings to your command class: | setting to your command class: |
| | | |
| | .. code-block:: python | .. code-block:: python |
- | | @Configuration( | @Configuration(type='eventing') |
+ | | | |
+ | | @Configuration( | @Configuration(type='events') |
| | retainsevents=True, streaming=False) | class SomeCommand(GeneratingCommand) |
| | class SomeCommand(GeneratingCommand) | ... |
| | ... | |
| | | |
| | Or add these lines to default/commands.conf: | |
| | | |
- | | .. code-block:: | |
- | | retains events = true | |
+ | | .. code-block:: text | |
+ | | | |
+ | | retainsevents = true | |
| | streaming = false | |
+----------+---------------------------------------------------+---------------------------------------------------+
Configure your command class like this, if you wish to support both protocols:
- .. code-block:: python
- @Configuration(type='eventing', retainsevents=True, streaming=False)
+ .. code-block:: python
+
+ @Configuration(type='events', retainsevents=True, streaming=False)
class SomeCommand(GeneratingCommand)
...
You might also consider adding these lines to commands.conf instead of adding them to your command class:
- .. code-block:: python
- retains events = false
+ .. code-block:: python
+
+ retainsevents = false
streaming = false
Reporting Generating command
@@ -149,28 +156,32 @@ class SomeCommand(GeneratingCommand)
| | settings to your command class: | setting to your command class: |
| | | |
| | .. code-block:: python | .. code-block:: python |
+ | | | |
| | @Configuration(retainsevents=False) | @Configuration(type='reporting') |
| | class SomeCommand(GeneratingCommand) | class SomeCommand(GeneratingCommand) |
| | ... | ... |
| | | |
| | Or add this lines to default/commands.conf: | |
| | | |
- | | .. code-block:: | |
- | | retains events = false | |
+ | | .. code-block:: text | |
+ | | | |
+ | | retainsevents = false | |
| | streaming = false | |
+----------+---------------------------------------------------+---------------------------------------------------+
Configure your command class like this, if you wish to support both protocols:
- .. code-block:: python
+ .. code-block:: python
+
@Configuration(type='reporting', streaming=False)
class SomeCommand(GeneratingCommand)
...
You might also consider adding these lines to commands.conf instead of adding them to your command class:
- .. code-block:: python
- retains events = false
+ .. code-block:: text
+
+ retainsevents = false
streaming = false
"""
@@ -193,9 +204,59 @@ def _execute(self, ifile, process):
:return: `None`.
"""
- self._record_writer.write_records(self.generate())
+ if self._protocol_version == 2:
+ self._execute_v2(ifile, self.generate())
+ else:
+ assert self._protocol_version == 1
+ self._record_writer.write_records(self.generate())
self.finish()
+ def _execute_chunk_v2(self, process, chunk):
+ count = 0
+ records = []
+ for row in process:
+ records.append(row)
+ count += 1
+ if count == self._record_writer._maxresultrows:
+ break
+
+ for row in records:
+ self._record_writer.write_record(row)
+
+ if count == self._record_writer._maxresultrows:
+ self._finished = False
+ else:
+ self._finished = True
+
+ def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout, allow_empty_input=True):
+ """ Process data.
+
+ :param argv: Command line arguments.
+ :type argv: list or tuple
+
+ :param ifile: Input data file.
+ :type ifile: file
+
+ :param ofile: Output data file.
+ :type ofile: file
+
+ :param allow_empty_input: For generating commands, it must be true. Doing otherwise will cause an error.
+ :type allow_empty_input: bool
+
+ :return: :const:`None`
+ :rtype: NoneType
+
+ """
+
+ # Generating commands are expected to run on an empty set of inputs as the first command being run in a search,
+ # also this class implements its own separate _execute_chunk_v2 method which does not respect allow_empty_input
+ # so ensure that allow_empty_input is always True
+
+ if not allow_empty_input:
+ raise ValueError("allow_empty_input cannot be False for Generating Commands")
+ else:
+ return super(GeneratingCommand, self).process(argv=argv, ifile=ifile, ofile=ofile, allow_empty_input=True)
+
# endregion
# region Types
@@ -280,7 +341,7 @@ class ConfigurationSettings(SearchCommand.ConfigurationSettings):
==================== ======================================================================================
Value Description
-------------------- --------------------------------------------------------------------------------------
- :const:`'eventing'` Runs as the first command in the Splunk events pipeline. Cannot be distributed.
+ :const:`'events'` Runs as the first command in the Splunk events pipeline. Cannot be distributed.
:const:`'reporting'` Runs as the first command in the Splunk reports pipeline. Cannot be distributed.
:const:`'streaming'` Runs as the first command in the Splunk streams pipeline. May be distributed.
==================== ======================================================================================
@@ -303,16 +364,22 @@ def fix_up(cls, command):
if command.generate == GeneratingCommand.generate:
raise AttributeError('No GeneratingCommand.generate override')
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
iteritems = SearchCommand.ConfigurationSettings.iteritems(self)
version = self.command.protocol_version
if version == 2:
- iteritems = ifilter(lambda (name, value): name != 'distributed', iteritems)
- if self.distributed and self.type == 'streaming':
+ iteritems = ifilter(lambda name_value1: name_value1[0] != 'distributed', iteritems)
+ if not self.distributed and self.type == 'streaming':
iteritems = imap(
- lambda (name, value): (name, 'stateful') if name == 'type' else (name, value), iteritems)
+ lambda name_value: (name_value[0], 'stateful') if name_value[0] == 'type' else (name_value[0], name_value[1]), iteritems)
return iteritems
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
+
pass
# endregion
diff --git a/bin/ta_dmarc/splunklib/searchcommands/internals.py b/bin/ta_dmarc/splunklib/searchcommands/internals.py
index be57703..1ea2833 100644
--- a/bin/ta_dmarc/splunklib/searchcommands/internals.py
+++ b/bin/ta_dmarc/splunklib/searchcommands/internals.py
@@ -14,45 +14,65 @@
# License for the specific language governing permissions and limitations
# under the License.
-from __future__ import absolute_import, division, print_function, unicode_literals
+from __future__ import absolute_import, division, print_function
+from io import TextIOWrapper
from collections import deque, namedtuple
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from ..ordereddict import OrderedDict
-from cStringIO import StringIO
-from itertools import chain, imap
+from splunklib import six
+from collections import OrderedDict
+from splunklib.six.moves import StringIO
+from itertools import chain
+from splunklib.six.moves import map as imap
from json import JSONDecoder, JSONEncoder
from json.encoder import encode_basestring_ascii as json_encode_string
-from urllib import unquote
+from splunklib.six.moves import urllib
import csv
import gzip
import os
import re
import sys
+import warnings
from . import environment
csv.field_size_limit(10485760) # The default value is 128KB; upping to 10MB. See SPL-12117 for background on this issue
-if sys.platform == 'win32':
- # Work around the fact that on Windows '\n' is mapped to '\r\n'. The typical solution is to simply open files in
- # binary mode, but stdout is already open, thus this hack. 'CPython' and 'PyPy' work differently. We assume that
- # all other Python implementations are compatible with 'CPython'. This might or might not be a valid assumption.
- from platform import python_implementation
- implementation = python_implementation()
- fileno = sys.stdout.fileno()
- if implementation == 'PyPy':
- sys.stdout = os.fdopen(fileno, 'wb', 0)
- else:
- from msvcrt import setmode
- setmode(fileno, os.O_BINARY)
+
+def set_binary_mode(fh):
+ """ Helper method to set up binary mode for file handles.
+ Emphasis being sys.stdin, sys.stdout, sys.stderr.
+ For python3, we want to return .buffer
+ For python2+windows we want to set os.O_BINARY
+ """
+ typefile = TextIOWrapper if sys.version_info >= (3, 0) else file
+ # check for file handle
+ if not isinstance(fh, typefile):
+ return fh
+
+ # check for python3 and buffer
+ if sys.version_info >= (3, 0) and hasattr(fh, 'buffer'):
+ return fh.buffer
+ # check for python3
+ elif sys.version_info >= (3, 0):
+ pass
+ # check for windows python2. SPL-175233 -- python3 stdout is already binary
+ elif sys.platform == 'win32':
+ # Work around the fact that on Windows '\n' is mapped to '\r\n'. The typical solution is to simply open files in
+ # binary mode, but stdout is already open, thus this hack. 'CPython' and 'PyPy' work differently. We assume that
+ # all other Python implementations are compatible with 'CPython'. This might or might not be a valid assumption.
+ from platform import python_implementation
+ implementation = python_implementation()
+ if implementation == 'PyPy':
+ return os.fdopen(fh.fileno(), 'wb', 0)
+ else:
+ import msvcrt
+ msvcrt.setmode(fh.fileno(), os.O_BINARY)
+ return fh
class CommandLineParser(object):
- """ Parses the arguments to a search command.
+ r""" Parses the arguments to a search command.
A search command line is described by the following syntax.
@@ -210,7 +230,7 @@ def replace(match):
_escaped_character_re = re.compile(r'(\\.|""|[\\"])')
- _fieldnames_re = re.compile(r"""("(?:\\.|""|[^"])+"|(?:\\.|[^\s"])+)""")
+ _fieldnames_re = re.compile(r"""("(?:\\.|""|[^"\\])+"|(?:\\.|[^\s"])+)""")
_options_re = re.compile(r"""
# Captures a set of name/value pairs when used with re.finditer
@@ -243,7 +263,7 @@ class ConfigurationSettingsType(type):
"""
def __new__(mcs, module, name, bases):
- mcs = super(ConfigurationSettingsType, mcs).__new__(mcs, name, bases, {})
+ mcs = super(ConfigurationSettingsType, mcs).__new__(mcs, str(name), bases, {})
return mcs
def __init__(cls, module, name, bases):
@@ -264,10 +284,10 @@ def validate_configuration_setting(specification, name, value):
return value
specification = namedtuple(
- b'ConfigurationSettingSpecification', (
- b'type',
- b'constraint',
- b'supporting_protocols'))
+ 'ConfigurationSettingSpecification', (
+ 'type',
+ 'constraint',
+ 'supporting_protocols'))
# P1 [ ] TODO: Review ConfigurationSettingsType.specification_matrix for completeness and correctness
@@ -294,7 +314,7 @@ def validate_configuration_setting(specification, name, value):
supporting_protocols=[1]),
'maxinputs': specification(
type=int,
- constraint=lambda value: 0 <= value <= sys.maxint,
+ constraint=lambda value: 0 <= value <= six.MAXSIZE,
supporting_protocols=[2]),
'overrides_timeorder': specification(
type=bool,
@@ -321,22 +341,24 @@ def validate_configuration_setting(specification, name, value):
constraint=None,
supporting_protocols=[1]),
'streaming_preop': specification(
- type=(bytes, unicode),
+ type=(bytes, six.text_type),
constraint=None,
supporting_protocols=[1, 2]),
'type': specification(
- type=(bytes, unicode),
- constraint=lambda value: value in ('eventing', 'reporting', 'streaming'),
+ type=(bytes, six.text_type),
+ constraint=lambda value: value in ('events', 'reporting', 'streaming'),
supporting_protocols=[2])}
class CsvDialect(csv.Dialect):
""" Describes the properties of Splunk CSV streams """
- delimiter = b','
- quotechar = b'"'
+ delimiter = ','
+ quotechar = '"'
doublequote = True
skipinitialspace = False
- lineterminator = b'\r\n'
+ lineterminator = '\r\n'
+ if sys.version_info >= (3, 0) and sys.platform == 'win32':
+ lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
@@ -344,8 +366,9 @@ class InputHeader(dict):
""" Represents a Splunk input header as a collection of name/value pairs.
"""
+
def __str__(self):
- return '\n'.join([name + ':' + value for name, value in self.iteritems()])
+ return '\n'.join([name + ':' + value for name, value in six.iteritems(self)])
def read(self, ifile):
""" Reads an input header from an input file.
@@ -366,15 +389,16 @@ def read(self, ifile):
# start of a new item
if name is not None:
self[name] = value[:-1] # value sans trailing newline
- name, value = item[0], unquote(item[1])
+ name, value = item[0], urllib.parse.unquote(item[1])
elif name is not None:
# continuation of the current item
- value += unquote(line)
+ value += urllib.parse.unquote(line)
- if name is not None: self[name] = value[:-1] if value[-1] == '\n' else value
+ if name is not None:
+ self[name] = value[:-1] if value[-1] == '\n' else value
-Message = namedtuple(b'Message', (b'type', b'text'))
+Message = namedtuple('Message', ('type', 'text'))
class MetadataDecoder(JSONDecoder):
@@ -392,7 +416,7 @@ def _object_hook(dictionary):
while len(stack):
instance, member_name, dictionary = stack.popleft()
- for name, value in dictionary.iteritems():
+ for name, value in six.iteritems(dictionary):
if isinstance(value, dict):
stack.append((dictionary, name, value))
@@ -468,7 +492,7 @@ class RecordWriter(object):
def __init__(self, ofile, maxresultrows=None):
self._maxresultrows = 50000 if maxresultrows is None else maxresultrows
- self._ofile = ofile
+ self._ofile = set_binary_mode(ofile)
self._fieldnames = None
self._buffer = StringIO()
@@ -479,8 +503,9 @@ def __init__(self, ofile, maxresultrows=None):
self._inspector = OrderedDict()
self._chunk_count = 0
- self._record_count = 0
- self._total_record_count = 0L
+ self._pending_record_count = 0
+ self._committed_record_count = 0
+ self.custom_fields = set()
@property
def is_flushed(self):
@@ -496,7 +521,37 @@ def ofile(self):
@ofile.setter
def ofile(self, value):
- self._ofile = value
+ self._ofile = set_binary_mode(value)
+
+ @property
+ def pending_record_count(self):
+ return self._pending_record_count
+
+ @property
+ def _record_count(self):
+ warnings.warn(
+ "_record_count will be deprecated soon. Use pending_record_count instead.",
+ PendingDeprecationWarning
+ )
+ return self.pending_record_count
+
+ @property
+ def committed_record_count(self):
+ return self._committed_record_count
+
+ @property
+ def _total_record_count(self):
+ warnings.warn(
+ "_total_record_count will be deprecated soon. Use committed_record_count instead.",
+ PendingDeprecationWarning
+ )
+ return self.committed_record_count
+
+ def write(self, data):
+ bytes_type = bytes if sys.version_info >= (3, 0) else str
+ if not isinstance(data, bytes_type):
+ data = data.encode('utf-8')
+ self.ofile.write(data)
def flush(self, finished=None, partial=None):
assert finished is None or isinstance(finished, bool)
@@ -515,16 +570,16 @@ def write_record(self, record):
def write_records(self, records):
self._ensure_validity()
+ records = list(records)
write_record = self._write_record
for record in records:
write_record(record)
def _clear(self):
- self._buffer.reset()
+ self._buffer.seek(0)
self._buffer.truncate()
self._inspector.clear()
- self._record_count = 0
- self._flushed = False
+ self._pending_record_count = 0
def _ensure_validity(self):
if self._finished is True:
@@ -536,9 +591,9 @@ def _write_record(self, record):
fieldnames = self._fieldnames
if fieldnames is None:
- self._fieldnames = fieldnames = record.keys()
- value_list = imap(lambda fn: unicode(fn).encode('utf-8'), fieldnames)
- value_list = imap(lambda fn: (fn, b'__mv_' + fn), value_list)
+ self._fieldnames = fieldnames = list(record.keys())
+ self._fieldnames.extend([i for i in self.custom_fields if i not in self._fieldnames])
+ value_list = imap(lambda fn: (str(fn), str('__mv_') + str(fn)), fieldnames)
self._writerow(list(chain.from_iterable(value_list)))
get_value = record.get
@@ -561,14 +616,14 @@ def _write_record(self, record):
if len(value) > 1:
value_list = value
- sv = b''
- mv = b'$'
+ sv = ''
+ mv = '$'
for value in value_list:
if value is None:
- sv += b'\n'
- mv += b'$;$'
+ sv += '\n'
+ mv += '$;$'
continue
value_t = type(value)
@@ -577,17 +632,17 @@ def _write_record(self, record):
if value_t is bool:
value = str(value.real)
- elif value_t is unicode:
- value = value.encode('utf-8', errors='backslashreplace')
- elif value_t is int or value_t is long or value_t is float or value_t is complex:
+ elif value_t is six.text_type:
+ value = value
+ elif isinstance(value, six.integer_types) or value_t is float or value_t is complex:
value = str(value)
elif issubclass(value_t, (dict, list, tuple)):
value = str(''.join(RecordWriter._iterencode_json(value, 0)))
else:
value = repr(value).encode('utf-8', errors='backslashreplace')
- sv += value + b'\n'
- mv += value.replace(b'$', b'$$') + b'$;$'
+ sv += value + '\n'
+ mv += value.replace('$', '$$') + '$;$'
values += (sv[:-1], mv[:-2])
continue
@@ -603,11 +658,13 @@ def _write_record(self, record):
values += (value, None)
continue
- if value_t is unicode:
- values += (value.encode('utf-8', errors='backslashreplace'), None)
+ if value_t is six.text_type:
+ if six.PY2:
+ value = value.encode('utf-8')
+ values += (value, None)
continue
- if value_t is int or value_t is long or value_t is float or value_t is complex:
+ if isinstance(value, six.integer_types) or value_t is float or value_t is complex:
values += (str(value), None)
continue
@@ -615,12 +672,12 @@ def _write_record(self, record):
values += (str(''.join(RecordWriter._iterencode_json(value, 0))), None)
continue
- values += (repr(value).encode('utf-8', errors='backslashreplace'), None)
+ values += (repr(value), None)
self._writerow(values)
- self._record_count += 1
+ self._pending_record_count += 1
- if self._record_count >= self._maxresultrows:
+ if self.pending_record_count >= self._maxresultrows:
self.flush(partial=True)
try:
@@ -657,10 +714,9 @@ def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
- if self._record_count > 0 or (self._chunk_count == 0 and 'messages' in self._inspector):
+ if self.pending_record_count > 0 or (self._chunk_count == 0 and 'messages' in self._inspector):
messages = self._inspector.get('messages')
- write = self._ofile.write
if self._chunk_count == 0:
@@ -672,12 +728,12 @@ def flush(self, finished=None, partial=None):
message_level = RecordWriterV1._message_level.get
for level, text in messages:
- write(message_level(level, level))
- write('=')
- write(text)
- write('\r\n')
+ self.write(message_level(level, level))
+ self.write('=')
+ self.write(text)
+ self.write('\r\n')
- write('\r\n')
+ self.write('\r\n')
elif messages is not None:
@@ -695,10 +751,10 @@ def flush(self, finished=None, partial=None):
for level, text in messages:
print(level, text, file=stderr)
- write(self._buffer.getvalue())
- self._clear()
+ self.write(self._buffer.getvalue())
self._chunk_count += 1
- self._total_record_count += self._record_count
+ self._committed_record_count += self.pending_record_count
+ self._clear()
self._finished = finished is True
@@ -716,44 +772,43 @@ class RecordWriterV2(RecordWriter):
def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
- inspector = self._inspector
-
- if self._flushed is False:
-
- self._total_record_count += self._record_count
- self._chunk_count += 1
-
- # TODO: DVPL-6448: splunklib.searchcommands | Add support for partial: true when it is implemented in
- # ChunkedExternProcessor (See SPL-103525)
- #
- # We will need to replace the following block of code with this block:
- #
- # metadata = [
- # ('inspector', self._inspector if len(self._inspector) else None),
- # ('finished', finished),
- # ('partial', partial)]
- if len(inspector) == 0:
- inspector = None
-
- if partial is True:
- finished = False
-
- metadata = [item for item in ('inspector', inspector), ('finished', finished)]
- self._write_chunk(metadata, self._buffer.getvalue())
- self._clear()
+ if partial or not finished:
+ # Don't flush partial chunks, since the SCP v2 protocol does not
+ # provide a way to send partial chunks yet.
+ return
- elif finished is True:
- self._write_chunk((('finished', True),), '')
+ if not self.is_flushed:
+ self.write_chunk(finished=True)
- self._finished = finished is True
+ def write_chunk(self, finished=None):
+ inspector = self._inspector
+ self._committed_record_count += self.pending_record_count
+ self._chunk_count += 1
+
+ # TODO: DVPL-6448: splunklib.searchcommands | Add support for partial: true when it is implemented in
+ # ChunkedExternProcessor (See SPL-103525)
+ #
+ # We will need to replace the following block of code with this block:
+ #
+ # metadata = [item for item in (('inspector', inspector), ('finished', finished), ('partial', partial))]
+ #
+ # if partial is True:
+ # finished = False
+
+ if len(inspector) == 0:
+ inspector = None
+
+ metadata = [item for item in (('inspector', inspector), ('finished', finished))]
+ self._write_chunk(metadata, self._buffer.getvalue())
+ self._clear()
def write_metadata(self, configuration):
self._ensure_validity()
- metadata = chain(configuration.iteritems(), (('inspector', self._inspector if self._inspector else None),))
+ metadata = chain(six.iteritems(configuration), (('inspector', self._inspector if self._inspector else None),))
self._write_chunk(metadata, '')
- self._ofile.write('\n')
+ self.write('\n')
self._clear()
def write_metric(self, name, value):
@@ -761,26 +816,29 @@ def write_metric(self, name, value):
self._inspector['metric.' + name] = value
def _clear(self):
- RecordWriter._clear(self)
+ super(RecordWriterV2, self)._clear()
self._fieldnames = None
def _write_chunk(self, metadata, body):
if metadata:
metadata = str(''.join(self._iterencode_json(dict([(n, v) for n, v in metadata if v is not None]), 0)))
+ if sys.version_info >= (3, 0):
+ metadata = metadata.encode('utf-8')
metadata_length = len(metadata)
else:
metadata_length = 0
+ if sys.version_info >= (3, 0):
+ body = body.encode('utf-8')
body_length = len(body)
if not (metadata_length > 0 or body_length > 0):
return
- start_line = b'chunked 1.0,' + bytes(metadata_length) + b',' + bytes(body_length) + b'\n'
- write = self._ofile.write
- write(start_line)
- write(metadata)
- write(body)
+ start_line = 'chunked 1.0,%s,%s\n' % (metadata_length, body_length)
+ self.write(start_line)
+ self.write(metadata)
+ self.write(body)
self._ofile.flush()
- self._flushed = False
+ self._flushed = True
diff --git a/bin/ta_dmarc/splunklib/searchcommands/reporting_command.py b/bin/ta_dmarc/splunklib/searchcommands/reporting_command.py
index c856ee1..9470861 100644
--- a/bin/ta_dmarc/splunklib/searchcommands/reporting_command.py
+++ b/bin/ta_dmarc/splunklib/searchcommands/reporting_command.py
@@ -23,6 +23,7 @@
from .streaming_command import StreamingCommand
from .search_command import SearchCommand
from .validators import Set
+from splunklib import six
class ReportingCommand(SearchCommand):
@@ -93,7 +94,7 @@ def prepare(self):
self._configuration.streaming_preop = ' '.join(streaming_preop)
return
- raise RuntimeError('Unrecognized reporting command phase: {}'.format(json_encode_string(unicode(phase))))
+ raise RuntimeError('Unrecognized reporting command phase: {}'.format(json_encode_string(six.text_type(phase))))
def reduce(self, records):
""" Override this method to produce a reporting data structure.
@@ -252,7 +253,7 @@ def fix_up(cls, command):
cls._requires_preop = False
return
- f = vars(command)[b'map'] # Function backing the map method
+ f = vars(command)['map'] # Function backing the map method
# EXPLANATION OF PREVIOUS STATEMENT: There is no way to add custom attributes to methods. See [Why does
# setattr fail on a method](http://stackoverflow.com/questions/7891277/why-does-setattr-fail-on-a-bound-method) for a discussion of this issue.
@@ -265,7 +266,7 @@ def fix_up(cls, command):
# Create new StreamingCommand.ConfigurationSettings class
- module = command.__module__ + b'.' + command.__name__ + b'.map'
+ module = command.__module__ + '.' + command.__name__ + '.map'
name = b'ConfigurationSettings'
bases = (StreamingCommand.ConfigurationSettings,)
diff --git a/bin/ta_dmarc/splunklib/searchcommands/search_command.py b/bin/ta_dmarc/splunklib/searchcommands/search_command.py
index 2f6cb08..dd11391 100644
--- a/bin/ta_dmarc/splunklib/searchcommands/search_command.py
+++ b/bin/ta_dmarc/splunklib/searchcommands/search_command.py
@@ -19,22 +19,27 @@
# Absolute imports
from collections import namedtuple
-try:
- from collections import OrderedDict # must be python 2.7
-except ImportError:
- from ..ordereddict import OrderedDict
+
+import io
+
+from collections import OrderedDict
from copy import deepcopy
-from cStringIO import StringIO
-from itertools import chain, ifilter, imap, islice, izip
-from logging import _levelNames, getLevelName, getLogger
+from splunklib.six.moves import StringIO
+from itertools import chain, islice
+from splunklib.six.moves import filter as ifilter, map as imap, zip as izip
+from splunklib import six
+if six.PY2:
+ from logging import _levelNames, getLevelName, getLogger
+else:
+ from logging import _nameToLevel as _levelNames, getLevelName, getLogger
try:
from shutil import make_archive
except ImportError:
# Used for recording, skip on python 2.6
pass
from time import time
-from urllib import unquote
-from urlparse import urlsplit
+from splunklib.six.moves.urllib.parse import unquote
+from splunklib.six.moves.urllib.parse import urlsplit
from warnings import warn
from xml.etree import ElementTree
@@ -47,7 +52,7 @@
# Relative imports
-from . internals import (
+from .internals import (
CommandLineParser,
CsvDialect,
InputHeader,
@@ -63,6 +68,7 @@
from . import Boolean, Option, environment
from ..client import Service
+
# ----------------------------------------------------------------------------------------------------------------------
# P1 [ ] TODO: Log these issues against ChunkedExternProcessor
@@ -89,6 +95,7 @@ class SearchCommand(object):
""" Represents a custom search command.
"""
+
def __init__(self):
# Variables that may be used, but not altered by derived classes
@@ -114,6 +121,7 @@ def __init__(self):
self._default_logging_level = self._logger.level
self._record_writer = None
self._records = None
+ self._allow_empty_input = True
def __str__(self):
text = ' '.join(chain((type(self).name, str(self.options)), [] if self.fieldnames is None else self.fieldnames))
@@ -150,7 +158,7 @@ def logging_level(self):
def logging_level(self, value):
if value is None:
value = self._default_logging_level
- if isinstance(value, (bytes, unicode)):
+ if isinstance(value, (bytes, six.text_type)):
try:
level = _levelNames[value.upper()]
except KeyError:
@@ -162,6 +170,14 @@ def logging_level(self, value):
raise ValueError('Unrecognized logging level: {}'.format(value))
self._logger.setLevel(level)
+ def add_field(self, current_record, field_name, field_value):
+ self._record_writer.custom_fields.add(field_name)
+ current_record[field_name] = field_value
+
+ def gen_record(self, **record):
+ self._record_writer.custom_fields |= set(record.keys())
+ return record
+
record = Option(doc='''
**Syntax: record=
@@ -246,7 +262,7 @@ def search_results_info(self):
invocation.
:return: Search results info:const:`None`, if the search results info file associated with the command
- invocation is inaccessible.
+ invocation is inaccessible.
:rtype: SearchResultsInfo or NoneType
"""
@@ -269,10 +285,10 @@ def search_results_info(self):
path = os.path.join(dispatch_dir, 'info.csv')
try:
- with open(path, 'rb') as f:
+ with io.open(path, 'r') as f:
reader = csv.reader(f, dialect=CsvDialect)
- fields = reader.next()
- values = reader.next()
+ fields = next(reader)
+ values = next(reader)
except IOError as error:
if error.errno == 2:
self.logger.error('Search results info file {} does not exist.'.format(json_encode_string(path)))
@@ -290,7 +306,7 @@ def convert_value(value):
except ValueError:
return value
- info = ObjectView(dict(imap(lambda (f, v): (convert_field(f), convert_value(v)), izip(fields, values))))
+ info = ObjectView(dict(imap(lambda f_v: (convert_field(f_v[0]), convert_value(f_v[1])), izip(fields, values))))
try:
count_map = info.countMap
@@ -307,7 +323,7 @@ def convert_value(value):
except AttributeError:
pass
else:
- messages = ifilter(lambda (t, m): t or m, izip(msg_type.split('\n'), msg_text.split('\n')))
+ messages = ifilter(lambda t_m: t_m[0] or t_m[1], izip(msg_type.split('\n'), msg_text.split('\n')))
info.msg = [Message(message) for message in messages]
del info.msgType
@@ -328,6 +344,7 @@ def service(self):
specifying this pair of configuration settings in commands.conf:
.. code-block:: python
+
enableheader = true
requires_srinfo = true
@@ -335,8 +352,8 @@ def service(self):
:code:`requires_srinfo` setting is false by default. Hence, you must set it.
:return: :class:`splunklib.client.Service`, if :code:`enableheader` and :code:`requires_srinfo` are both
- :code:`true`. Otherwise, if either :code:`enableheader` or :code:`requires_srinfo` are :code:`false`, a value
- of :code:`None` is returned.
+ :code:`true`. Otherwise, if either :code:`enableheader` or :code:`requires_srinfo` are :code:`false`, a value
+ of :code:`None` is returned.
"""
if self._service is not None:
@@ -387,7 +404,7 @@ def flush(self):
:return: :const:`None`
"""
- self._record_writer.flush(partial=True)
+ self._record_writer.flush(finished=False)
def prepare(self):
""" Prepare for execution.
@@ -402,7 +419,7 @@ def prepare(self):
"""
pass
- def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout):
+ def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout, allow_empty_input=True):
""" Process data.
:param argv: Command line arguments.
@@ -414,10 +431,16 @@ def process(self, argv=sys.argv, ifile=sys.stdin, ofile=sys.stdout):
:param ofile: Output data file.
:type ofile: file
+ :param allow_empty_input: Allow empty input records for the command, if False an Error will be returned if empty chunk body is encountered when read
+ :type allow_empty_input: bool
+
:return: :const:`None`
:rtype: NoneType
"""
+
+ self._allow_empty_input = allow_empty_input
+
if len(argv) > 1:
self._process_protocol_v1(argv, ifile, ofile)
else:
@@ -443,7 +466,7 @@ def _map_metadata(self, argv):
def _map(metadata_map):
metadata = {}
- for name, value in metadata_map.iteritems():
+ for name, value in six.iteritems(metadata_map):
if isinstance(value, dict):
value = _map(value)
else:
@@ -493,7 +516,7 @@ def _map(metadata_map):
'username':
(lambda v: v.ppc_user, lambda s: s.search_results_info)}}
- _MetadataSource = namedtuple(b'Source', (b'argv', b'input_header', b'search_results_info'))
+ _MetadataSource = namedtuple('Source', ('argv', 'input_header', 'search_results_info'))
def _prepare_protocol_v1(self, argv, ifile, ofile):
@@ -580,7 +603,7 @@ def _process_protocol_v1(self, argv, ifile, ofile):
ifile = self._prepare_protocol_v1(argv, ifile, ofile)
self._record_writer.write_record(dict(
- (n, ','.join(v) if isinstance(v, (list, tuple)) else v) for n, v in self._configuration.iteritems()))
+ (n, ','.join(v) if isinstance(v, (list, tuple)) else v) for n, v in six.iteritems(self._configuration)))
self.finish()
elif argv[1] == '__EXECUTE__':
@@ -608,7 +631,7 @@ def _process_protocol_v1(self, argv, ifile, ofile):
raise RuntimeError(message)
except (SyntaxError, ValueError) as error:
- self.write_error(unicode(error))
+ self.write_error(six.text_type(error))
self.flush()
exit(0)
@@ -623,6 +646,19 @@ def _process_protocol_v1(self, argv, ifile, ofile):
debug('%s.process finished under protocol_version=1', class_name)
+ def _protocol_v2_option_parser(self, arg):
+ """ Determines if an argument is an Option/Value pair, or just a Positional Argument.
+ Method so different search commands can handle parsing of arguments differently.
+
+ :param arg: A single argument provided to the command from SPL
+ :type arg: str
+
+ :return: [OptionName, OptionValue] OR [PositionalArgument]
+ :rtype: List[str]
+
+ """
+ return arg.split('=', 1)
+
def _process_protocol_v2(self, argv, ifile, ofile):
""" Processes records on the `input stream optionally writing records to the output stream.
@@ -645,7 +681,7 @@ def _process_protocol_v2(self, argv, ifile, ofile):
# noinspection PyBroadException
try:
debug('Reading metadata')
- metadata, body = self._read_chunk(ifile)
+ metadata, body = self._read_chunk(self._as_binary_stream(ifile))
action = getattr(metadata, 'action', None)
@@ -693,11 +729,12 @@ def _process_protocol_v2(self, argv, ifile, ofile):
if args and type(args) == list:
for arg in args:
- result = arg.split('=', 1)
+ result = self._protocol_v2_option_parser(arg)
if len(result) == 1:
- self.fieldnames.append(result[0])
+ self.fieldnames.append(str(result[0]))
else:
name, value = result
+ name = str(name)
try:
option = self.options[name]
except KeyError:
@@ -723,7 +760,7 @@ def _process_protocol_v2(self, argv, ifile, ofile):
if error_count > 0:
exit(1)
- debug(' command: %s', unicode(self))
+ debug(' command: %s', six.text_type(self))
debug('Preparing for execution')
self.prepare()
@@ -741,7 +778,7 @@ def _process_protocol_v2(self, argv, ifile, ofile):
setattr(info, attr, [arg for arg in getattr(info, attr) if not arg.startswith('record=')])
metadata = MetadataEncoder().encode(self._metadata)
- ifile.record('chunked 1.0,', unicode(len(metadata)), ',0\n', metadata)
+ ifile.record('chunked 1.0,', six.text_type(len(metadata)), ',0\n', metadata)
if self.show_configuration:
self.write_info(self.name + ' command configuration: ' + str(self._configuration))
@@ -764,7 +801,6 @@ def _process_protocol_v2(self, argv, ifile, ofile):
# noinspection PyBroadException
try:
debug('Executing under protocol_version=2')
- self._records = self._records_protocol_v2
self._metadata.action = 'execute'
self._execute(ifile, None)
except SystemExit:
@@ -798,15 +834,15 @@ def write_metric(self, name, value):
:param name: Name of the metric.
:type name: basestring
- :param value: A 4-tuple containing the value of metric :param:`name` where
+ :param value: A 4-tuple containing the value of metric ``name`` where
value[0] = Elapsed seconds or :const:`None`.
value[1] = Number of invocations or :const:`None`.
value[2] = Input count or :const:`None`.
value[3] = Output count or :const:`None`.
- The :data:`SearchMetric` type provides a convenient encapsulation of :param:`value`.
- The :data:`SearchMetric` type provides a convenient encapsulation of :param:`value`.
+ The :data:`SearchMetric` type provides a convenient encapsulation of ``value``.
+ The :data:`SearchMetric` type provides a convenient encapsulation of ``value``.
:return: :const:`None`.
@@ -821,6 +857,8 @@ def _decode_list(mv):
_encoded_value = re.compile(r'\$(?P- (?:\$\$|[^$])*)\$(?:;|$)') # matches a single value in an encoded list
+ # Note: Subclasses must override this method so that it can be called
+ # called as self._execute(ifile, None)
def _execute(self, ifile, process):
""" Default processing loop
@@ -834,22 +872,38 @@ def _execute(self, ifile, process):
:rtype: NoneType
"""
- self._record_writer.write_records(process(self._records(ifile)))
- self.finish()
+ if self.protocol_version == 1:
+ self._record_writer.write_records(process(self._records(ifile)))
+ self.finish()
+ else:
+ assert self._protocol_version == 2
+ self._execute_v2(ifile, process)
@staticmethod
- def _read_chunk(ifile):
+ def _as_binary_stream(ifile):
+ naught = ifile.read(0)
+ if isinstance(naught, bytes):
+ return ifile
+
+ try:
+ return ifile.buffer
+ except AttributeError as error:
+ raise RuntimeError('Failed to get underlying buffer: {}'.format(error))
+ @staticmethod
+ def _read_chunk(istream):
# noinspection PyBroadException
+ assert isinstance(istream.read(0), six.binary_type), 'Stream must be binary'
+
try:
- header = ifile.readline()
+ header = istream.readline()
except Exception as error:
raise RuntimeError('Failed to read transport header: {}'.format(error))
if not header:
return None
- match = SearchCommand._header.match(header)
+ match = SearchCommand._header.match(six.ensure_str(header))
if match is None:
raise RuntimeError('Failed to parse transport header: {}'.format(header))
@@ -859,35 +913,39 @@ def _read_chunk(ifile):
body_length = int(body_length)
try:
- metadata = ifile.read(metadata_length)
+ metadata = istream.read(metadata_length)
except Exception as error:
raise RuntimeError('Failed to read metadata of length {}: {}'.format(metadata_length, error))
decoder = MetadataDecoder()
try:
- metadata = decoder.decode(metadata)
+ metadata = decoder.decode(six.ensure_str(metadata))
except Exception as error:
raise RuntimeError('Failed to parse metadata of length {}: {}'.format(metadata_length, error))
# if body_length <= 0:
# return metadata, ''
+ body = ""
try:
- body = ifile.read(body_length)
+ if body_length > 0:
+ body = istream.read(body_length)
except Exception as error:
raise RuntimeError('Failed to read body of length {}: {}'.format(body_length, error))
- return metadata, body
+ return metadata, six.ensure_str(body)
_header = re.compile(r'chunked\s+1.0\s*,\s*(\d+)\s*,\s*(\d+)\s*\n')
def _records_protocol_v1(self, ifile):
+ return self._read_csv_records(ifile)
+ def _read_csv_records(self, ifile):
reader = csv.reader(ifile, dialect=CsvDialect)
try:
- fieldnames = reader.next()
+ fieldnames = next(reader)
except StopIteration:
return
@@ -908,51 +966,37 @@ def _records_protocol_v1(self, ifile):
record[fieldname] = value
yield record
- def _records_protocol_v2(self, ifile):
+ def _execute_v2(self, ifile, process):
+ istream = self._as_binary_stream(ifile)
while True:
- result = self._read_chunk(ifile)
+ result = self._read_chunk(istream)
if not result:
return
metadata, body = result
action = getattr(metadata, 'action', None)
-
if action != 'execute':
raise RuntimeError('Expected execute action, not {}'.format(action))
- finished = getattr(metadata, 'finished', False)
+ self._finished = getattr(metadata, 'finished', False)
self._record_writer.is_flushed = False
- if len(body) > 0:
- reader = csv.reader(StringIO(body), dialect=CsvDialect)
+ self._execute_chunk_v2(process, result)
- try:
- fieldnames = reader.next()
- except StopIteration:
- return
+ self._record_writer.write_chunk(finished=self._finished)
- mv_fieldnames = dict([(name, name[len('__mv_'):]) for name in fieldnames if name.startswith('__mv_')])
+ def _execute_chunk_v2(self, process, chunk):
+ metadata, body = chunk
- if len(mv_fieldnames) == 0:
- for values in reader:
- yield OrderedDict(izip(fieldnames, values))
- else:
- for values in reader:
- record = OrderedDict()
- for fieldname, value in izip(fieldnames, values):
- if fieldname.startswith('__mv_'):
- if len(value) > 0:
- record[mv_fieldnames[fieldname]] = self._decode_list(value)
- elif fieldname not in record:
- record[fieldname] = value
- yield record
-
- if finished:
- return
+ if len(body) <= 0 and not self._allow_empty_input:
+ raise ValueError(
+ "No records found to process. Set allow_empty_input=True in dispatch function to move forward "
+ "with empty records.")
- self.flush()
+ records = self._read_csv_records(StringIO(body))
+ self._record_writer.write_records(process(records))
def _report_unexpected_error(self):
@@ -1003,7 +1047,8 @@ def __str__(self):
:return: String representation of this instance
"""
- text = ', '.join(imap(lambda (name, value): name + '=' + json_encode_string(unicode(value)), self.iteritems()))
+ #text = ', '.join(imap(lambda (name, value): name + '=' + json_encode_string(unicode(value)), self.iteritems()))
+ text = ', '.join(['{}={}'.format(name, json_encode_string(six.text_type(value))) for (name, value) in six.iteritems(self)])
return text
# region Methods
@@ -1022,23 +1067,29 @@ def fix_up(cls, command_class):
"""
return
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
definitions = type(self).configuration_setting_definitions
version = self.command.protocol_version
return ifilter(
- lambda (name, value): value is not None, imap(
+ lambda name_value1: name_value1[1] is not None, imap(
lambda setting: (setting.name, setting.__get__(self)), ifilter(
lambda setting: setting.is_supported_by_protocol(version), definitions)))
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
+
pass # endregion
pass # endregion
-SearchMetric = namedtuple(b'SearchMetric', (b'elapsed_seconds', b'invocation_count', b'input_count', b'output_count'))
+SearchMetric = namedtuple('SearchMetric', ('elapsed_seconds', 'invocation_count', 'input_count', 'output_count'))
-def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys.stdout, module_name=None):
+def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys.stdout, module_name=None, allow_empty_input=True):
""" Instantiates and executes a search command class
This function implements a `conditional script stanza `_ based on the value of
@@ -1061,11 +1112,13 @@ def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=sys
:type output_file: :code:`file`
:param module_name: Name of the module calling :code:`dispatch` or :const:`None`.
:type module_name: :code:`basestring`
+ :param allow_empty_input: Allow empty input records for the command, if False an Error will be returned if empty chunk body is encountered when read
+ :type allow_empty_input: bool
:returns: :const:`None`
**Example**
- .. code-block:: python
+ .. code-block:: python
:linenos:
#!/usr/bin/env python
@@ -1081,7 +1134,7 @@ def stream(records):
**Example**
- .. code-block:: python
+ .. code-block:: python
:linenos:
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option, validators
@@ -1098,4 +1151,4 @@ def stream(records):
assert issubclass(command_class, SearchCommand)
if module_name is None or module_name == '__main__':
- command_class().process(argv, input_file, output_file)
+ command_class().process(argv, input_file, output_file, allow_empty_input)
diff --git a/bin/ta_dmarc/splunklib/searchcommands/streaming_command.py b/bin/ta_dmarc/splunklib/searchcommands/streaming_command.py
index 12e9f03..fa075ed 100644
--- a/bin/ta_dmarc/splunklib/searchcommands/streaming_command.py
+++ b/bin/ta_dmarc/splunklib/searchcommands/streaming_command.py
@@ -16,7 +16,8 @@
from __future__ import absolute_import, division, print_function, unicode_literals
-from itertools import ifilter, imap
+from splunklib import six
+from splunklib.six.moves import map as imap, filter as ifilter
from .decorators import ConfigurationSetting
from .search_command import SearchCommand
@@ -172,17 +173,23 @@ def fix_up(cls, command):
raise AttributeError('No StreamingCommand.stream override')
return
+ # TODO: Stop looking like a dictionary because we don't obey the semantics
+ # N.B.: Does not use Python 2 dict copy semantics
def iteritems(self):
iteritems = SearchCommand.ConfigurationSettings.iteritems(self)
version = self.command.protocol_version
if version == 1:
if self.required_fields is None:
- iteritems = ifilter(lambda (name, value): name != 'clear_required_fields', iteritems)
+ iteritems = ifilter(lambda name_value: name_value[0] != 'clear_required_fields', iteritems)
else:
- iteritems = ifilter(lambda (name, value): name != 'distributed', iteritems)
- if self.distributed:
+ iteritems = ifilter(lambda name_value2: name_value2[0] != 'distributed', iteritems)
+ if not self.distributed:
iteritems = imap(
- lambda (name, value): (name, 'stateful') if name == 'type' else (name, value), iteritems)
+ lambda name_value1: (name_value1[0], 'stateful') if name_value1[0] == 'type' else (name_value1[0], name_value1[1]), iteritems)
return iteritems
+ # N.B.: Does not use Python 3 dict view semantics
+ if not six.PY2:
+ items = iteritems
+
# endregion
diff --git a/bin/ta_dmarc/splunklib/searchcommands/validators.py b/bin/ta_dmarc/splunklib/searchcommands/validators.py
index 9b9fee3..22f0e16 100644
--- a/bin/ta_dmarc/splunklib/searchcommands/validators.py
+++ b/bin/ta_dmarc/splunklib/searchcommands/validators.py
@@ -18,11 +18,13 @@
from json.encoder import encode_basestring_ascii as json_encode_string
from collections import namedtuple
-from cStringIO import StringIO
+from splunklib.six.moves import StringIO
from io import open
import csv
import os
import re
+from splunklib import six
+from splunklib.six.moves import getcwd
class Validator(object):
@@ -58,7 +60,7 @@ class Boolean(Validator):
def __call__(self, value):
if not (value is None or isinstance(value, bool)):
- value = unicode(value).lower()
+ value = six.text_type(value).lower()
if value not in Boolean.truth_values:
raise ValueError('Unrecognized truth value: {0}'.format(value))
value = Boolean.truth_values[value]
@@ -79,9 +81,9 @@ class Code(Validator):
def __init__(self, mode='eval'):
"""
:param mode: Specifies what kind of code must be compiled; it can be :const:`'exec'`, if source consists of a
- sequence of statements, :const:`'eval'`, if it consists of a single expression, or :const:`'single'` if it
- consists of a single interactive statement. In the latter case, expression statements that evaluate to
- something other than :const:`None` will be printed.
+ sequence of statements, :const:`'eval'`, if it consists of a single expression, or :const:`'single'` if it
+ consists of a single interactive statement. In the latter case, expression statements that evaluate to
+ something other than :const:`None` will be printed.
:type mode: unicode or bytes
"""
@@ -91,14 +93,16 @@ def __call__(self, value):
if value is None:
return None
try:
- return Code.object(compile(value, 'string', self._mode), unicode(value))
+ return Code.object(compile(value, 'string', self._mode), six.text_type(value))
except (SyntaxError, TypeError) as error:
- raise ValueError(error.message)
+ message = str(error)
+
+ six.raise_from(ValueError(message), error)
def format(self, value):
return None if value is None else value.source
- object = namedtuple(b'Code', (b'object', 'source'))
+ object = namedtuple('Code', ('object', 'source'))
class Fieldname(Validator):
@@ -109,7 +113,7 @@ class Fieldname(Validator):
def __call__(self, value):
if value is not None:
- value = unicode(value)
+ value = six.text_type(value)
if Fieldname.pattern.match(value) is None:
raise ValueError('Illegal characters in fieldname: {}'.format(value))
return value
@@ -132,7 +136,7 @@ def __call__(self, value):
if value is None:
return value
- path = unicode(value)
+ path = six.text_type(value)
if not os.path.isabs(path):
path = os.path.join(self.directory, path)
@@ -149,7 +153,7 @@ def format(self, value):
return None if value is None else value.name
_var_run_splunk = os.path.join(
- os.environ['SPLUNK_HOME'] if 'SPLUNK_HOME' in os.environ else os.getcwdu(), 'var', 'run', 'splunk')
+ os.environ['SPLUNK_HOME'] if 'SPLUNK_HOME' in os.environ else getcwd(), 'var', 'run', 'splunk')
class Integer(Validator):
@@ -183,7 +187,10 @@ def __call__(self, value):
if value is None:
return None
try:
- value = long(value)
+ if six.PY2:
+ value = long(value)
+ else:
+ value = int(value)
except ValueError:
raise ValueError('Expected integer value, not {}'.format(json_encode_string(value)))
@@ -191,7 +198,49 @@ def __call__(self, value):
return value
def format(self, value):
- return None if value is None else unicode(long(value))
+ return None if value is None else six.text_type(int(value))
+
+
+class Float(Validator):
+ """ Validates float option values.
+
+ """
+ def __init__(self, minimum=None, maximum=None):
+ if minimum is not None and maximum is not None:
+ def check_range(value):
+ if not (minimum <= value <= maximum):
+ raise ValueError('Expected float in the range [{0},{1}], not {2}'.format(minimum, maximum, value))
+ return
+ elif minimum is not None:
+ def check_range(value):
+ if value < minimum:
+ raise ValueError('Expected float in the range [{0},+∞], not {1}'.format(minimum, value))
+ return
+ elif maximum is not None:
+ def check_range(value):
+ if value > maximum:
+ raise ValueError('Expected float in the range [-∞,{0}], not {1}'.format(maximum, value))
+ return
+ else:
+ def check_range(value):
+ return
+
+ self.check_range = check_range
+ return
+
+ def __call__(self, value):
+ if value is None:
+ return None
+ try:
+ value = float(value)
+ except ValueError:
+ raise ValueError('Expected float value, not {}'.format(json_encode_string(value)))
+
+ self.check_range(value)
+ return value
+
+ def format(self, value):
+ return None if value is None else six.text_type(float(value))
class Duration(Validator):
@@ -244,10 +293,10 @@ class List(Validator):
class Dialect(csv.Dialect):
""" Describes the properties of list option values. """
strict = True
- delimiter = b','
- quotechar = b'"'
+ delimiter = str(',')
+ quotechar = str('"')
doublequote = True
- lineterminator = b'\n'
+ lineterminator = str('\n')
skipinitialspace = True
quoting = csv.QUOTE_MINIMAL
@@ -262,7 +311,7 @@ def __call__(self, value):
return value
try:
- value = csv.reader([value], self.Dialect).next()
+ value = next(csv.reader([value], self.Dialect))
except csv.Error as error:
raise ValueError(error)
@@ -297,7 +346,7 @@ def __call__(self, value):
if value is None:
return None
- value = unicode(value)
+ value = six.text_type(value)
if value not in self.membership:
raise ValueError('Unrecognized value: {0}'.format(value))
@@ -305,7 +354,7 @@ def __call__(self, value):
return self.membership[value]
def format(self, value):
- return None if value is None else self.membership.keys()[self.membership.values().index(value)]
+ return None if value is None else list(self.membership.keys())[list(self.membership.values()).index(value)]
class Match(Validator):
@@ -313,19 +362,19 @@ class Match(Validator):
"""
def __init__(self, name, pattern, flags=0):
- self.name = unicode(name)
+ self.name = six.text_type(name)
self.pattern = re.compile(pattern, flags)
def __call__(self, value):
if value is None:
return None
- value = unicode(value)
+ value = six.text_type(value)
if self.pattern.match(value) is None:
raise ValueError('Expected {}, not {}'.format(self.name, json_encode_string(value)))
return value
def format(self, value):
- return None if value is None else unicode(value)
+ return None if value is None else six.text_type(value)
class OptionName(Validator):
@@ -336,13 +385,13 @@ class OptionName(Validator):
def __call__(self, value):
if value is not None:
- value = unicode(value)
+ value = six.text_type(value)
if OptionName.pattern.match(value) is None:
raise ValueError('Illegal characters in option name: {}'.format(value))
return value
def format(self, value):
- return None if value is None else unicode(value)
+ return None if value is None else six.text_type(value)
class RegularExpression(Validator):
@@ -353,9 +402,9 @@ def __call__(self, value):
if value is None:
return None
try:
- value = re.compile(unicode(value))
+ value = re.compile(six.text_type(value))
except re.error as error:
- raise ValueError('{}: {}'.format(unicode(error).capitalize(), value))
+ raise ValueError('{}: {}'.format(six.text_type(error).capitalize(), value))
return value
def format(self, value):
@@ -372,7 +421,7 @@ def __init__(self, *args):
def __call__(self, value):
if value is None:
return None
- value = unicode(value)
+ value = six.text_type(value)
if value not in self.membership:
raise ValueError('Unrecognized value: {}'.format(value))
return value
@@ -381,4 +430,4 @@ def format(self, value):
return self.__call__(value)
-__all__ = ['Boolean', 'Code', 'Duration', 'File', 'Integer', 'List', 'Map', 'RegularExpression', 'Set']
+__all__ = ['Boolean', 'Code', 'Duration', 'File', 'Integer', 'Float', 'List', 'Map', 'RegularExpression', 'Set']
diff --git a/bin/ta_dmarc/splunklib/six.py b/bin/ta_dmarc/splunklib/six.py
new file mode 100644
index 0000000..d13e50c
--- /dev/null
+++ b/bin/ta_dmarc/splunklib/six.py
@@ -0,0 +1,993 @@
+# Copyright (c) 2010-2020 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson "
+__version__ = "1.14.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+
+ def __len__(self):
+ return 1 << 31
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result) # Invokes __set__.
+ try:
+ # This is a bit ugly, but it avoids running this again by
+ # removing this descriptor.
+ delattr(obj.__class__, self.name)
+ except AttributeError:
+ pass
+ return result
+
+
+class MovedModule(_LazyDescr):
+
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ _module = self._resolve()
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+ """
+ A meta path importer to import six.moves and its submodules.
+
+ This class implements a PEP302 finder and loader. It should be compatible
+ with Python 2.5 and all existing versions of Python3
+ """
+
+ def __init__(self, six_module_name):
+ self.name = six_module_name
+ self.known_modules = {}
+
+ def _add_module(self, mod, *fullnames):
+ for fullname in fullnames:
+ self.known_modules[self.name + "." + fullname] = mod
+
+ def _get_module(self, fullname):
+ return self.known_modules[self.name + "." + fullname]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.known_modules:
+ return self
+ return None
+
+ def __get_module(self, fullname):
+ try:
+ return self.known_modules[fullname]
+ except KeyError:
+ raise ImportError("This loader does not know module " + fullname)
+
+ def load_module(self, fullname):
+ try:
+ # in case of a reload
+ return sys.modules[fullname]
+ except KeyError:
+ pass
+ mod = self.__get_module(fullname)
+ if isinstance(mod, MovedModule):
+ mod = mod._resolve()
+ else:
+ mod.__loader__ = self
+ sys.modules[fullname] = mod
+ return mod
+
+ def is_package(self, fullname):
+ """
+ Return true, if the named module is a package.
+
+ We need this method to get correct spec objects with
+ Python 3.4 (see PEP451)
+ """
+ return hasattr(self.__get_module(fullname), "__path__")
+
+ def get_code(self, fullname):
+ """Return None
+
+ Required, if is_package is implemented"""
+ self.__get_module(fullname) # eventually raises ImportError
+ return None
+ get_source = get_code # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+ """Lazy loading of moved objects"""
+ __path__ = [] # mark as package
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("intern", "__builtin__", "sys"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+ MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("getoutput", "commands", "subprocess"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserDict", "UserDict", "collections"),
+ MovedAttribute("UserList", "UserList", "collections"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser",
+ "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog",
+ "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+ "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+ _moved_attributes += [
+ MovedModule("winreg", "_winreg"),
+ ]
+
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+ MovedAttribute("splittag", "urllib", "urllib.parse"),
+ MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("splitvalue", "urllib", "urllib.parse"),
+ MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+ "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+ "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+ MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
+ MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+ "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+ "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+ "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+ __path__ = [] # mark as package
+ parse = _importer._get_module("moves.urllib_parse")
+ error = _importer._get_module("moves.urllib_error")
+ request = _importer._get_module("moves.urllib_request")
+ response = _importer._get_module("moves.urllib_response")
+ robotparser = _importer._get_module("moves.urllib_robotparser")
+
+ def __dir__(self):
+ return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+ "moves.urllib")
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+
+try:
+ advance_iterator = next
+except NameError:
+ def advance_iterator(it):
+ return it.next()
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ def create_unbound_method(func, cls):
+ return func
+
+ Iterator = object
+else:
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ def create_unbound_method(func, cls):
+ return types.MethodType(func, None, cls)
+
+ class Iterator(object):
+
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(get_unbound_function,
+ """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+ def iterkeys(d, **kw):
+ return iter(d.keys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.values(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.items(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.lists(**kw))
+
+ viewkeys = operator.methodcaller("keys")
+
+ viewvalues = operator.methodcaller("values")
+
+ viewitems = operator.methodcaller("items")
+else:
+ def iterkeys(d, **kw):
+ return d.iterkeys(**kw)
+
+ def itervalues(d, **kw):
+ return d.itervalues(**kw)
+
+ def iteritems(d, **kw):
+ return d.iteritems(**kw)
+
+ def iterlists(d, **kw):
+ return d.iterlists(**kw)
+
+ viewkeys = operator.methodcaller("viewkeys")
+
+ viewvalues = operator.methodcaller("viewvalues")
+
+ viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+ "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+ "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+ def b(s):
+ return s.encode("latin-1")
+
+ def u(s):
+ return s
+ unichr = chr
+ import struct
+ int2byte = struct.Struct(">B").pack
+ del struct
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ del io
+ _assertCountEqual = "assertCountEqual"
+ if sys.version_info[1] <= 1:
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
+ else:
+ _assertRaisesRegex = "assertRaisesRegex"
+ _assertRegex = "assertRegex"
+ _assertNotRegex = "assertNotRegex"
+else:
+ def b(s):
+ return s
+ # Workaround for standalone backslash
+
+ def u(s):
+ return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+ unichr = unichr
+ int2byte = chr
+
+ def byte2int(bs):
+ return ord(bs[0])
+
+ def indexbytes(buf, i):
+ return ord(buf[i])
+ iterbytes = functools.partial(itertools.imap, ord)
+ import StringIO
+ StringIO = BytesIO = StringIO.StringIO
+ _assertCountEqual = "assertItemsEqual"
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+ return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+ return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+ return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+def assertNotRegex(self, *args, **kwargs):
+ return getattr(self, _assertNotRegex)(*args, **kwargs)
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+ def reraise(tp, value, tb=None):
+ try:
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+ finally:
+ value = None
+ tb = None
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+ exec_("""def reraise(tp, value, tb=None):
+ try:
+ raise tp, value, tb
+ finally:
+ tb = None
+""")
+
+
+if sys.version_info[:2] > (3,):
+ exec_("""def raise_from(value, from_value):
+ try:
+ raise value from from_value
+ finally:
+ value = None
+""")
+else:
+ def raise_from(value, from_value):
+ raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (isinstance(fp, file) and
+ isinstance(data, unicode) and
+ fp.encoding is not None):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+if sys.version_info[:2] < (3, 3):
+ _print = print_
+
+ def print_(*args, **kwargs):
+ fp = kwargs.get("file", sys.stdout)
+ flush = kwargs.pop("flush", False)
+ _print(*args, **kwargs)
+ if flush and fp is not None:
+ fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+ # This does exactly the same what the :func:`py3:functools.update_wrapper`
+ # function does on Python versions after 3.2. It sets the ``__wrapped__``
+ # attribute on ``wrapper`` object and it doesn't raise an error if any of
+ # the attributes mentioned in ``assigned`` and ``updated`` are missing on
+ # ``wrapped`` object.
+ def _update_wrapper(wrapper, wrapped,
+ assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ for attr in assigned:
+ try:
+ value = getattr(wrapped, attr)
+ except AttributeError:
+ continue
+ else:
+ setattr(wrapper, attr, value)
+ for attr in updated:
+ getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+ wrapper.__wrapped__ = wrapped
+ return wrapper
+ _update_wrapper.__doc__ = functools.update_wrapper.__doc__
+
+ def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ return functools.partial(_update_wrapper, wrapped=wrapped,
+ assigned=assigned, updated=updated)
+ wraps.__doc__ = functools.wraps.__doc__
+
+else:
+ wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(type):
+
+ def __new__(cls, name, this_bases, d):
+ if sys.version_info[:2] >= (3, 7):
+ # This version introduced PEP 560 that requires a bit
+ # of extra care (we mimic what is done by __build_class__).
+ resolved_bases = types.resolve_bases(bases)
+ if resolved_bases is not bases:
+ d['__orig_bases__'] = bases
+ else:
+ resolved_bases = bases
+ return meta(name, resolved_bases, d)
+
+ @classmethod
+ def __prepare__(cls, name, this_bases):
+ return meta.__prepare__(name, bases)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ slots = orig_vars.get('__slots__')
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ if hasattr(cls, '__qualname__'):
+ orig_vars['__qualname__'] = cls.__qualname__
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
+
+
+def ensure_binary(s, encoding='utf-8', errors='strict'):
+ """Coerce **s** to six.binary_type.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> encoded to `bytes`
+ - `bytes` -> `bytes`
+ """
+ if isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ elif isinstance(s, binary_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def ensure_str(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to `str`.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if not isinstance(s, (text_type, binary_type)):
+ raise TypeError("not expecting type '%s'" % type(s))
+ if PY2 and isinstance(s, text_type):
+ s = s.encode(encoding, errors)
+ elif PY3 and isinstance(s, binary_type):
+ s = s.decode(encoding, errors)
+ return s
+
+
+def ensure_text(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to six.text_type.
+
+ For Python 2:
+ - `unicode` -> `unicode`
+ - `str` -> `unicode`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif isinstance(s, text_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def python_2_unicode_compatible(klass):
+ """
+ A class decorator that defines __unicode__ and __str__ methods under Python 2.
+ Under Python 3 it does nothing.
+
+ To support Python 2 and 3 with a single code base, define a __str__ method
+ returning text and apply this decorator to the class.
+ """
+ if PY2:
+ if '__str__' not in klass.__dict__:
+ raise ValueError("@python_2_unicode_compatible cannot be applied "
+ "to %s because it doesn't define __str__()." %
+ klass.__name__)
+ klass.__unicode__ = klass.__str__
+ klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+ return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = [] # required for PEP 302 and PEP 451
+__package__ = __name__ # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+ __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+ for i, importer in enumerate(sys.meta_path):
+ # Here's some real nastiness: Another "instance" of the six module might
+ # be floating around. Therefore, we can't use isinstance() to check for
+ # the six meta path importer, since the other six instance will have
+ # inserted an importer with different class.
+ if (type(importer).__name__ == "_SixMetaPathImporter" and
+ importer.name == __name__):
+ del sys.meta_path[i]
+ break
+ del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
+
+import warnings
+
+def deprecated(message):
+ def deprecated_decorator(func):
+ def deprecated_func(*args, **kwargs):
+ warnings.warn("{} is a deprecated function. {}".format(func.__name__, message),
+ category=DeprecationWarning,
+ stacklevel=2)
+ warnings.simplefilter('default', DeprecationWarning)
+ return func(*args, **kwargs)
+ return deprecated_func
+ return deprecated_decorator
\ No newline at end of file
diff --git a/default/app.conf b/default/app.conf
index 24e5d5d..88d828e 100644
--- a/default/app.conf
+++ b/default/app.conf
@@ -7,7 +7,7 @@ build = 1
[launcher]
author = Jorrit Folmer
-version = 4.1.0
+version = 4.1.1
description = Add-on for ingesting DMARC aggregate reports into Splunk
[ui]