Yes, I'm quite willing to share the simple script I wrote. Keep in mind that it is not a general solution, but addressed my needs in a very specific Jenkins setup. The Python script relies on the psycopg2 library to talk to Postgres and the jenkinsapi package to talk to Jenkins.
Put all attached files in the same directory. First, you will need to create your Postgres database. The SQL statements in jenkins_schema.sql should do this for you if you run it via psql. You will certainly want to modify TEST_JOB_RE to match the job names you're interested in or simply remove the job name check to get all of them. You will need to modify the Postgres username and password hard coded in update_build_db.py and the Jenkins URL and credentials in jenkins.ini. Then, you should be able to run update_build_db.py as a script to update your Postgres db with job and build information from Jenkins. Ballantyne, Bob wrote: > This sounds remarkably similar to a task on my current 'to-do' list. > Even the PostgresQL db and the analysts use case. I had intended > using Groovy. > > Any chance if posting this somewhere on the Jenkins plugin site, or on > Scriptler? I'm sure there are probably many others who would be VERY > interested. > > > On Jul 10, 2013, at 5:29 PM, "JonathanRRogers" > <jonathanrrog...@gmail.com <mailto:jonathanrrog...@gmail.com>> wrote: > >> >> >> On Friday, June 28, 2013 4:14:46 PM UTC-4, Michael Barbine wrote: >> >> Hello! >> >> I'm looking for some input on a proven way to put all of the data >> contained in the Jenkins XML or API into a RDBMS. Doesn't matter >> which one... I can move it around later. >> >> >> What do you mean by "all the data"? More importantly, what is the >> ultimate goal? I wrote a script which extracts job and build >> information via the Jenkins API and inserts/updates a PostgreSQL >> database so I could analyze it more easily. Is that similar to what >> you're trying to do? >> -- >> You received this message because you are subscribed to the Google >> Groups "Jenkins Users" group. >> To unsubscribe from this group and stop receiving emails from it, >> send an email to jenkinsci-users+unsubscr...@googlegroups.com >> <mailto:jenkinsci-users+unsubscr...@googlegroups.com>. >> For more options, visit https://groups.google.com/groups/opt_out. >> >> > -- > You received this message because you are subscribed to a topic in the > Google Groups "Jenkins Users" group. > To unsubscribe from this topic, visit > https://groups.google.com/d/topic/jenkinsci-users/wHxZ9dm-JE4/unsubscribe. > To unsubscribe from this group and all its topics, send an email to > jenkinsci-users+unsubscr...@googlegroups.com. > For more options, visit https://groups.google.com/groups/opt_out. > > -- Jonathan Rogers -- You received this message because you are subscribed to the Google Groups "Jenkins Users" group. To unsubscribe from this group and stop receiving emails from it, send an email to jenkinsci-users+unsubscr...@googlegroups.com. For more options, visit https://groups.google.com/groups/opt_out.
-- -- PostgreSQL database dump -- SET statement_timeout = 0; SET client_encoding = 'UTF8'; SET standard_conforming_strings = on; SET check_function_bodies = false; SET client_min_messages = warning; -- -- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: -- CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; -- -- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: -- COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; SET search_path = public, pg_catalog; SET default_tablespace = ''; SET default_with_oids = false; -- -- Name: build; Type: TABLE; Schema: public; Owner: social; Tablespace: -- CREATE TABLE build ( job text NOT NULL, id bigint NOT NULL, duration interval NOT NULL, began timestamp with time zone NOT NULL, status text NOT NULL, node_name text NOT NULL ); ALTER TABLE public.build OWNER TO social; -- -- Name: job; Type: TABLE; Schema: public; Owner: social; Tablespace: -- CREATE TABLE job ( job text NOT NULL, branch text NOT NULL ); ALTER TABLE public.job OWNER TO social; -- -- Name: test; Type: TABLE; Schema: public; Owner: social; Tablespace: -- CREATE TABLE test ( id bigint NOT NULL, job text NOT NULL, build_id bigint, classname text NOT NULL, name text NOT NULL, duration interval NOT NULL, errordetails text, errorstacktrace text, status text NOT NULL, stdout text, stderr text ); ALTER TABLE public.test OWNER TO social; -- -- Name: test_id_seq; Type: SEQUENCE; Schema: public; Owner: social -- CREATE SEQUENCE test_id_seq START WITH 1 INCREMENT BY 1 NO MINVALUE NO MAXVALUE CACHE 1; ALTER TABLE public.test_id_seq OWNER TO social; -- -- Name: test_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: social -- ALTER SEQUENCE test_id_seq OWNED BY test.id; -- -- Name: id; Type: DEFAULT; Schema: public; Owner: social -- ALTER TABLE ONLY test ALTER COLUMN id SET DEFAULT nextval('test_id_seq'::regclass); -- -- Name: build_pkey; Type: CONSTRAINT; Schema: public; Owner: social; Tablespace: -- ALTER TABLE ONLY build ADD CONSTRAINT build_pkey PRIMARY KEY (job, id); -- -- Name: job_pkey; Type: CONSTRAINT; Schema: public; Owner: social; Tablespace: -- ALTER TABLE ONLY job ADD CONSTRAINT job_pkey PRIMARY KEY (job); -- -- Name: test_pkey; Type: CONSTRAINT; Schema: public; Owner: social; Tablespace: -- ALTER TABLE ONLY test ADD CONSTRAINT test_pkey PRIMARY KEY (id); -- -- Name: job_id; Type: INDEX; Schema: public; Owner: social; Tablespace: -- CREATE INDEX job_id ON test USING btree (job, id); -- -- Name: test_name; Type: INDEX; Schema: public; Owner: social; Tablespace: -- CREATE INDEX test_name ON test USING btree (classname, name); -- -- Name: test_status; Type: INDEX; Schema: public; Owner: social; Tablespace: -- CREATE INDEX test_status ON test USING btree (status); -- -- Name: test_job_fkey; Type: FK CONSTRAINT; Schema: public; Owner: social -- ALTER TABLE ONLY test ADD CONSTRAINT test_job_fkey FOREIGN KEY (job, build_id) REFERENCES build(job, id) ON DELETE CASCADE; -- -- Name: public; Type: ACL; Schema: -; Owner: postgres -- REVOKE ALL ON SCHEMA public FROM PUBLIC; REVOKE ALL ON SCHEMA public FROM postgres; GRANT ALL ON SCHEMA public TO postgres; GRANT ALL ON SCHEMA public TO PUBLIC; -- -- PostgreSQL database dump complete --
#!/usr/bin/python from datetime import datetime import psycopg2 import logging from jenkins_config import ( get_jenkins_instance, tests_template_name, TEST_JOB_RE, ) from argparse import ArgumentParser def main(conn, dry_run, replace): cur = conn.cursor() j = get_jenkins_instance() jobs_to_delete = set() for name, job in j.get_jobs(): match = TEST_JOB_RE.match(name) if not match: continue cur.execute('select job from job where job=%s', (name,)) if not cur.fetchone(): logging.info('inserting job: %s', name) cur.execute('insert into job (job, branch) values (%s, %s)', (name, match.group(1))) build_ids = job.get_build_ids() cur.execute('select id from build where job=%s', (name,)) existing_ids = set(r[0] for r in cur.fetchall()) for build_id in build_ids: if build_id in existing_ids: if replace: logging.debug('deleting %s build %d', name, build_id) cur.execute("delete from build where job=%s and id=%s", (name, build_id)) else: logging.debug('Skipping job %s: %d', name, build_id) continue logging.info('Inserting job %s: %d', name, build_id) build = job.get_build(build_id) cur.execute(""" insert into build (job, id, duration, began, status, node_name) values (%s, %s, (%s || ' milliseconds')::interval, timestamptz 'epoch' + %s * interval '1 millisecond', %s, %s) """, (name, build_id, build.get_duration(), build.get_timestamp(), build.get_status(), build._data['builtOn'] or 'master')) if build.has_resultset(): rs = build.get_resultset() for iname, result in rs.iteritems(): logging.debug('result==%r', result) cur.execute(""" insert into test (job, build_id, className, name, duration, errorDetails, errorStackTrace, status, stdout, stderr) values (%s, %s, %s, %s, (%s || ' seconds')::interval, %s, %s, %s, %s, %s) """, (name, build_id, result.className, result.name, result.duration, result.errorDetails, result.errorStackTrace, result.status, result.stdout, result.stderr)) if __name__ == "__main__": parser = ArgumentParser() parser.add_argument('--dry-run', action='store_true') parser.add_argument('--replace', action='store_true') parser.add_argument('--verbose', '-v', default=0, action='count') args = parser.parse_args() logging.basicConfig(level=logging.WARNING - args.verbose * 10) conn = psycopg2.connect("dbname=jenkins user=social") main(conn, args.dry_run, args.replace) if args.dry_run: conn.rollback() else: conn.commit()
import logging import re from ConfigParser import SafeConfigParser from os.path import expanduser, exists from jenkinsapi import jenkins def strings_from_file(file_name): return filter(None, (l.strip() for l in open(file_name))) virtualenvs = strings_from_file(expanduser('~/virtualenv_names')) trigger_job_name = 'trigger_builds' tests_template_name = 'build_tests_template' TEST_JOB_RE = re.compile('(.+)\((%s)\)' % '|'.join('(%s)' % vn for vn in virtualenvs)) TRIGGER_REPO = expanduser('~/jobs/trigger_builds/workspace') BRANCHES_TO_BUILD_FILE = expanduser('~/branches_to_build') def branches_to_build(): return strings_from_file(BRANCHES_TO_BUILD_FILE) def get_jenkins_instance(): parser = SafeConfigParser() parser.read(expanduser('~/jenkins.ini')) url = parser.get('main', 'jenkins_url') user = parser.get('main', 'jenkins_user') password = parser.get('main', 'jenkins_password') logging.debug("jenkins.Jenkins(%r, username=%r, password=%r)", url, user, password) return jenkins.Jenkins(url, username=user, password=password) def read_property(propfile, propname): if exists(propfile): for l in open(propfile): if l.startswith(propname): return l.split('=')[1].strip() def get_branch_property(branch, propname): for propfile in ('pippin/jenkins/branch/%s.properties' % branch, 'pippin/jenkins/defaults.properties'): value = read_property(propfile, propname) if value is not None: return value
jenkins.ini
Description: application/wine-extension-ini