text
stringlengths
1.08k
93k
summary
stringlengths
40
5.44k
subset
stringclasses
16 values
package tfe import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "math/rand" "net/http" "net/url" "os" "reflect" "strconv" "strings" "time" "github.com/google/go-querystring/query" "github.com/hashicorp/go-cleanhttp" retryablehttp "github.com/hashicorp/go-retryablehttp" "github.com/svanharmelen/jsonapi" "golang.org/x/time/rate" ) const ( userAgent = "go-tfe" headerRateLimit = "X-RateLimit-Limit" headerRateReset = "X-RateLimit-Reset" // DefaultAddress of Terraform Enterprise. DefaultAddress = "https://app.terraform.io" // DefaultBasePath on which the API is served. DefaultBasePath = "/api/v2/" ) var ( // ErrWorkspaceLocked is returned when trying to lock a // locked workspace. ErrWorkspaceLocked = errors.New("workspace already locked") // ErrWorkspaceNotLocked is returned when trying to unlock // a unlocked workspace. ErrWorkspaceNotLocked = errors.New("workspace already unlocked") // ErrUnauthorized is returned when a receiving a 401. ErrUnauthorized = errors.New("unauthorized") // ErrResourceNotFound is returned when a receiving a 404. ErrResourceNotFound = errors.New("resource not found") ) // RetryLogHook allows a function to run before each retry. type RetryLogHook func(attemptNum int, resp *http.Response) // Config provides configuration details to the API client. type Config struct { // The address of the Terraform Enterprise API. Address string // The base path on which the API is served. BasePath string // API token used to access the Terraform Enterprise API. Token string // Headers that will be added to every request. Headers http.Header // A custom HTTP client to use. HTTPClient *http.Client // RetryLogHook is invoked each time a request is retried. RetryLogHook RetryLogHook } // DefaultConfig returns a default config structure. func DefaultConfig() *Config { config := &Config{ Address: os.Getenv("TFE_ADDRESS"), BasePath: DefaultBasePath, Token: os.Getenv("TFE_TOKEN"), Headers: make(http.Header), HTTPClient: cleanhttp.DefaultPooledClient(), } // Set the default address if none is given. if config.Address == "" { config.Address = DefaultAddress } // Set the default user agent. config.Headers.Set("User-Agent", userAgent) return config } // Client is the Terraform Enterprise API client. It provides the basic // connectivity and configuration for accessing the TFE API. type Client struct { baseURL *url.URL token string headers http.Header http *retryablehttp.Client limiter *rate.Limiter retryLogHook RetryLogHook retryServerErrors bool Applies Applies ConfigurationVersions ConfigurationVersions OAuthClients OAuthClients OAuthTokens OAuthTokens Organizations Organizations OrganizationTokens OrganizationTokens Plans Plans Policies Policies PolicyChecks PolicyChecks PolicySets PolicySets Runs Runs SSHKeys SSHKeys StateVersions StateVersions Teams Teams TeamAccess TeamAccesses TeamMembers TeamMembers TeamTokens TeamTokens Users Users Variables Variables Workspaces Workspaces } // NewClient creates a new Terraform Enterprise API client. func NewClient(cfg *Config) (*Client, error) { config := DefaultConfig() // Layer in the provided config for any non-blank values. if cfg != nil { if cfg.Address != "" { config.Address = cfg.Address } if cfg.BasePath != "" { config.BasePath = cfg.BasePath } if cfg.Token != "" { config.Token = cfg.Token } for k, v := range cfg.Headers { config.Headers[k] = v } if cfg.HTTPClient != nil { config.HTTPClient = cfg.HTTPClient } if cfg.RetryLogHook != nil { config.RetryLogHook = cfg.RetryLogHook } } // Parse the address to make sure its a valid URL. baseURL, err := url.Parse(config.Address) if err != nil { return nil, fmt.Errorf("invalid address: %v", err) } baseURL.Path = config.BasePath if !strings.HasSuffix(baseURL.Path, "/") { baseURL.Path += "/" } // This value must be provided by the user. if config.Token == "" { return nil, fmt.Errorf("missing API token") } // Create the client. client := &Client{ baseURL: baseURL, token: config.Token, headers: config.Headers, retryLogHook: config.RetryLogHook, } client.http = &retryablehttp.Client{ Backoff: client.retryHTTPBackoff, CheckRetry: client.retryHTTPCheck, ErrorHandler: retryablehttp.PassthroughErrorHandler, HTTPClient: config.HTTPClient, RetryWaitMin: 100 * time.Millisecond, RetryWaitMax: 400 * time.Millisecond, RetryMax: 30, } // Configure the rate limiter. if err := client.configureLimiter(); err != nil { return nil, err } // Create the services. client.Applies = &applies{client: client} client.ConfigurationVersions = &configurationVersions{client: client} client.OAuthClients = &oAuthClients{client: client} client.OAuthTokens = &oAuthTokens{client: client} client.Organizations = &organizations{client: client} client.OrganizationTokens = &organizationTokens{client: client} client.Plans = &plans{client: client} client.Policies = &policies{client: client} client.PolicyChecks = &policyChecks{client: client} client.PolicySets = &policySets{client: client} client.Runs = &runs{client: client} client.SSHKeys = &sshKeys{client: client} client.StateVersions = &stateVersions{client: client} client.Teams = &teams{client: client} client.TeamAccess = &teamAccesses{client: client} client.TeamMembers = &teamMembers{client: client} client.TeamTokens = &teamTokens{client: client} client.Users = &users{client: client} client.Variables = &variables{client: client} client.Workspaces = &workspaces{client: client} return client, nil } // RetryServerErrors configures the retry HTTP check to also retry // unexpected errors or requests that failed with a server error. func (c *Client) RetryServerErrors(retry bool) { c.retryServerErrors = retry } // retryHTTPCheck provides a callback for Client.CheckRetry which // will retry both rate limit (429) and server (>= 500) errors. func (c *Client) retryHTTPCheck(ctx context.Context, resp *http.Response, err error) (bool, error) { if ctx.Err() != nil { return false, ctx.Err() } if err != nil { return c.retryServerErrors, err } if resp.StatusCode == 429 || (c.retryServerErrors && resp.StatusCode >= 500) { return true, nil } return false, nil } // retryHTTPBackoff provides a generic callback for Client.Backoff which // will pass through all calls based on the status code of the response. func (c *Client) retryHTTPBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { if c.retryLogHook != nil { c.retryLogHook(attemptNum, resp) } // Use the rate limit backoff function when we are rate limited. if resp.StatusCode == 429 { return rateLimitBackoff(min, max, attemptNum, resp) } // Set custom duration's when we experience a service interruption. min = 700 * time.Millisecond max = 900 * time.Millisecond return retryablehttp.LinearJitterBackoff(min, max, attemptNum, resp) } // rateLimitBackoff provides a callback for Client.Backoff which will use the // X-RateLimit_Reset header to determine the time to wait. We add some jitter // to prevent a thundering herd. // // min and max are mainly used for bounding the jitter that will be added to // the reset time retrieved from the headers. But if the final wait time is // less then min, min will be used instead. func rateLimitBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { // rnd is used to generate pseudo-random numbers. rnd := rand.New(rand.NewSource(time.Now().UnixNano())) // First create some jitter bounded by the min and max durations. jitter := time.Duration(rnd.Float64() * float64(max-min)) if resp != nil { if v := resp.Header.Get(headerRateReset); v != "" { if reset, _ := strconv.ParseFloat(v, 64); reset > 0 { // Only update min if the given time to wait is longer. if wait := time.Duration(reset * 1e9); wait > min { min = wait } } } } return min + jitter } // configureLimiter configures the rate limiter. func (c *Client) configureLimiter() error { // Create a new request. req, err := http.NewRequest("GET", c.baseURL.String(), nil) if err != nil { return err } // Attach the default headers. for k, v := range c.headers { req.Header[k] = v } req.Header.Set("Accept", "application/vnd.api+json") // Make a single request to retrieve the rate limit headers. resp, err := c.http.HTTPClient.Do(req) if err != nil { return err } resp.Body.Close() // Set default values for when rate limiting is disabled. limit := rate.Inf burst := 0 if v := resp.Header.Get(headerRateLimit); v != "" { if rateLimit, _ := strconv.ParseFloat(v, 64); rateLimit > 0 { // Configure the limit and burst using a split of 2/3 for the limit and // 1/3 for the burst. This enables clients to burst 1/3 of the allowed // calls before the limiter kicks in. The remaining calls will then be // spread out evenly using intervals of time.Second / limit which should // prevent hitting the rate limit. limit = rate.Limit(rateLimit * 0.66) burst = int(rateLimit * 0.33) } } // Create a new limiter using the calculated values. c.limiter = rate.NewLimiter(limit, burst) return nil } // newRequest creates an API request. A relative URL path can be provided in // path, in which case it is resolved relative to the apiVersionPath of the // Client. Relative URL paths should always be specified without a preceding // slash. // If v is supplied, the value will be JSONAPI encoded and included as the // request body. If the method is GET, the value will be parsed and added as // query parameters. func (c *Client) newRequest(method, path string, v interface{}) (*retryablehttp.Request, error) { u, err := c.baseURL.Parse(path) if err != nil { return nil, err } // Create a request specific headers map. reqHeaders := make(http.Header) reqHeaders.Set("Authorization", "Bearer "+c.token) var body interface{} switch method { case "GET": reqHeaders.Set("Accept", "application/vnd.api+json") if v != nil { q, err := query.Values(v) if err != nil { return nil, err } u.RawQuery = q.Encode() } case "DELETE", "PATCH", "POST": reqHeaders.Set("Accept", "application/vnd.api+json") reqHeaders.Set("Content-Type", "application/vnd.api+json") if v != nil { buf := bytes.NewBuffer(nil) if err := jsonapi.MarshalPayloadWithoutIncluded(buf, v); err != nil { return nil, err } body = buf } case "PUT": reqHeaders.Set("Accept", "application/json") reqHeaders.Set("Content-Type", "application/octet-stream") body = v } req, err := retryablehttp.NewRequest(method, u.String(), body) if err != nil { return nil, err } // Set the default headers. for k, v := range c.headers { req.Header[k] = v } // Set the request specific headers. for k, v := range reqHeaders { req.Header[k] = v } return req, nil } // do sends an API request and returns the API response. The API response // is JSONAPI decoded and the document's primary data is stored in the value // pointed to by v, or returned as an error if an API error has occurred. // If v implements the io.Writer interface, the raw response body will be // written to v, without attempting to first decode it. // // The provided ctx must be non-nil. If it is canceled or times out, ctx.Err() // will be returned. func (c *Client) do(ctx context.Context, req *retryablehttp.Request, v interface{}) error { // Wait will block until the limiter can obtain a new token // or returns an error if the given context is canceled. if err := c.limiter.Wait(ctx); err != nil { return err } // Add the context to the request. req = req.WithContext(ctx) // Execute the request and check the response. resp, err := c.http.Do(req) if err != nil { // If we got an error, and the context has been canceled, // the context's error is probably more useful. select { case <-ctx.Done(): return ctx.Err() default: return err } } defer resp.Body.Close() // Basic response checking. if err := checkResponseCode(resp); err != nil { return err } // Return here if decoding the response isn't needed. if v == nil { return nil } // If v implements io.Writer, write the raw response body. if w, ok := v.(io.Writer); ok { _, err = io.Copy(w, resp.Body) return err } // Get the value of v so we can test if it's a struct. dst := reflect.Indirect(reflect.ValueOf(v)) // Return an error if v is not a struct or an io.Writer. if dst.Kind() != reflect.Struct { return fmt.Errorf("v must be a struct or an io.Writer") } // Try to get the Items and Pagination struct fields. items := dst.FieldByName("Items") pagination := dst.FieldByName("Pagination") // Unmarshal a single value if v does not contain the // Items and Pagination struct fields. if !items.IsValid() || !pagination.IsValid() { return jsonapi.UnmarshalPayload(resp.Body, v) } // Return an error if v.Items is not a slice. if items.Type().Kind() != reflect.Slice { return fmt.Errorf("v.Items must be a slice") } // Create a temporary buffer and copy all the read data into it. body := bytes.NewBuffer(nil) reader := io.TeeReader(resp.Body, body) // Unmarshal as a list of values as v.Items is a slice. raw, err := jsonapi.UnmarshalManyPayload(reader, items.Type().Elem()) if err != nil { return err } // Make a new slice to hold the results. sliceType := reflect.SliceOf(items.Type().Elem()) result := reflect.MakeSlice(sliceType, 0, len(raw)) // Add all of the results to the new slice. for _, v := range raw { result = reflect.Append(result, reflect.ValueOf(v)) } // Pointer-swap the result. items.Set(result) // As we are getting a list of values, we need to decode // the pagination details out of the response body. p, err := parsePagination(body) if err != nil { return err } // Pointer-swap the decoded pagination details. pagination.Set(reflect.ValueOf(p)) return nil } // ListOptions is used to specify pagination options when making API requests. // Pagination allows breaking up large result sets into chunks, or "pages". type ListOptions struct { // The page number to request. The results vary based on the PageSize. PageNumber int `url:"page[number],omitempty"` // The number of elements returned in a single page. PageSize int `url:"page[size],omitempty"` } // Pagination is used to return the pagination details of an API request. type Pagination struct { CurrentPage int `json:"current-page"` PreviousPage int `json:"prev-page"` NextPage int `json:"next-page"` TotalPages int `json:"total-pages"` TotalCount int `json:"total-count"` } func parsePagination(body io.Reader) (*Pagination, error) { var raw struct { Meta struct { Pagination Pagination `json:"pagination"` } `json:"meta"` } // JSON decode the raw response. if err := json.NewDecoder(body).Decode(&raw); err != nil { return &Pagination{}, err } return &raw.Meta.Pagination, nil } // checkResponseCode can be used to check the status code of an HTTP request. func checkResponseCode(r *http.Response) error { if r.StatusCode >= 200 && r.StatusCode <= 299 { return nil } switch r.StatusCode { case 401: return ErrUnauthorized case 404: return ErrResourceNotFound case 409: switch { case strings.HasSuffix(r.Request.URL.Path, "actions/lock"): return ErrWorkspaceLocked case strings.HasSuffix(r.Request.URL.Path, "actions/unlock"): return ErrWorkspaceNotLocked case strings.HasSuffix(r.Request.URL.Path, "actions/force-unlock"): return ErrWorkspaceNotLocked } } // Decode the error payload. errPayload := &jsonapi.ErrorsPayload{} err := json.NewDecoder(r.Body).Decode(errPayload) if err != nil || len(errPayload.Errors) == 0 { return fmt.Errorf(r.Status) } // Parse and format the errors. var errs []string for _, e := range errPayload.Errors { if e.Detail == "" { errs = append(errs, e.Title) } else { errs = append(errs, fmt.Sprintf("%s\n\n%s", e.Title, e.Detail)) } } return fmt.Errorf(strings.Join(errs, "\n")) }
The code is a Go package that allows users to access the Terraform Enterprise API. It includes functions for making API requests, handling authentication, and parsing responses. The code also defines constants and error types. External libraries are used for handling HTTP requests and JSON parsing.
stacksmol
Write a title and summarize: Protein function is encoded within protein sequence and protein domains. However, how protein domains cooperate within a protein to modulate overall activity and how this impacts functional diversification at the molecular and organism levels remains largely unaddressed. Focusing on three domains of the central class Drosophila Hox transcription factor AbdominalA (AbdA), we used combinatorial domain mutations and most known AbdA developmental functions as biological readouts to investigate how protein domains collectively shape protein activity. The results uncover redundancy, interactivity, and multifunctionality of protein domains as salient features underlying overall AbdA protein activity, providing means to apprehend functional diversity and accounting for the robustness of Hox-controlled developmental programs. Importantly, the results highlight context-dependency in protein domain usage and interaction, allowing major modifications in domains to be tolerated without general functional loss. The non-pleoitropic effect of domain mutation suggests that protein modification may contribute more broadly to molecular changes underlying morphological diversification during evolution, so far thought to rely largely on modification in gene cis-regulatory sequences. How the diversity of animal body plans is established remains a central question in developmental and evolutionary biology [1], [2]. A key step towards understanding the molecular basis underlying diversity is to decipher mechanisms controlling proper genome expression, and how variations in these mechanisms have been at the origin of developmental and evolutionary diversity. While a large number of studies have focused on the impact of cis-regulatory sequences organization (reviewed in [3]), deciphering the intrinsic functional organization of trans-acting transcription factors remains largely unaddressed. Studies have identified functional domains ([4]–[9] and [7], [10], [11] for reviews), but how different protein domains jointly and collectively act for defining the overall activity has been poorly assessed. Yet, a recent study highlights that the synthetic shuffling of protein domains within proteins of the yeast-mating signaling pathway results in the diversification of the mating behavior, demonstrating the importance of protein domain interactions for functional diversification [12]. Hox genes, which encode homeodomain (HD) -containing transcription factors, provide a suitable paradigm to decipher how function is encoded within protein sequence, and how associated changes may constitute the origin of functional specification and diversification. Hox genes have arisen from duplication events of ancestral genes, followed by sequence divergence that promoted the emergence of up to 14 paralogous groups in vertebrates. Hox paralogue proteins display distinct regulatory functions, promoting axial morphological diversification in all bilaterian animals [13]–[17]. Previous work has established that sequence changes in the HD, the DNA binding domain, and a few additional protein domains, have played a major role in the diversification of Hox protein function [4]–[9], [18]–[21]. However, how protein domains functionally interact to shape overall protein activity remains elusive. We focused on three protein domains from the Drosophila central Hox paralogue protein Abdominal (AbdA, Figure 1). These domains are related by their demonstrated or potential involvement in the recruitment of the Extradenticle (Exd) cofactor, homologous to vertebrate PBX proteins, known to have key roles in establishing Hox functional specificity. The first domain, known as hexapeptide (HX) or PID (Pbx Interacting Domain), with a core YPWM sequence, is found in all Hox paralogue groups, with the exception of some posterior Hox proteins. Biochemical, structural and functional studies have shown that this motif mediates interaction with the Exd/PBX class of Hox cofactors (collectively referred as PBC). The second domain, termed UbdA (UA) is specifically found in the central Hox proteins AbdA and Ultrabithorax (Ubx). This paralogue-specific domain was recently shown to be required for Exd recruitment in the repression of the limb-promoting gene Distalless (Dll) [8], [22]. The third domain (TD), similar in sequence (TDWM) to the YPWM motif, is also paralogue-specific. The TD motif retains the W that provides strong contact with the PBC class proteins, and matches the sequence of the HX motif in some Hox proteins (eg., Hoxa1). Evidence for an Exd recruiting role of the TD domain in AbdA however remains to be demonstrated. To start unraveling how protein domains collectively shape Hox protein activity, the effect of single, combined double or triple domain mutations were analyzed using most known AbdA functions as biological readouts. The large functional window covered by the study allows identifying functional attributes of protein domains taken in isolation and collectively, and a quantitative analysis by hierarchical clustering highlights the functional organization of the Hox protein AbdA. Given the phylogeny of the studied protein domains, the work has also implication regarding the mechanisms underlying the evolution of AbdA protein function. AbdA variants bearing single or all possible combinations of protein domain mutations (Figure 1A) were ectopically expressed through the binary UAS-Gal4 expression system [23]. Protein levels following induced expression were quantified and experimental conditions ensuring levels close to that of endogenous AbdA were selected (see Materials and Methods). Impact of AbdA variants on target gene control, phenotypic traits and locomotion behavior (Figure 1B), covering AbdA functions of increasing complexity in different tissues, were evaluated in the anterior region where the endogenous AbdA protein is absent. Quantified results (see Text S1) are presented as loss (and in few cases as gain) of regulatory potential. Eleven functional assays were used to assess domain requirements for AbdA activity (Figure 1B). Four assays rely on the regulation of AbdA target genes, for which evidence of a direct regulation has been previously reported, including the regulation of Distalless (Dll) [8], [24], [25] and Antennapedia (Antp) [26] in the epidermis, and the regulation of wingless (wg) [27] and decapentaplegic (dpp) [28], [29] in the visceral mesoderm. Six assays rely on analysis of phenotypic traits. One of these phenotypic trait, oenocyte specification, results from the regulation of a single target gene [30]. Others, cerebral branch [31], somatic muscles [32], A2 epidermal morphology [33], [34], neuroblast [35], [36] and heart cell lineage specification [37] likely depend of the coordinated regulation of several target genes. Finally, we also used a behavioral trait, larval locomotion, thought to rely on integrated AbdA function in two distinct tissues, the somatic musculature and the nervous system [38]. In the somatic musculature, the abdominal specific pattern is characterized by the presence of muscle located ventrally and absent in thoracic segments, a feature that can be visualized by the expression of nautilus (nau) [32]. This distinction was previously shown to result, at least in part, from the activity of AbdA [32]. Accordingly, anterior ectopic expression of AbdA using the mesodermal driver (24B-Gal4) results in ectopic ventral expression of Nau in anterior segments (Figure 2). We found however that none of the AbdA protein domains under study, alone or in combination, was required to specify the abdominal specific features of the somatic musculature (Figure 2 and Figure S1). In the same conditions, a point mutation at position 50 of the homeodomain that impairs AbdA binding to DNA resulted in the loss of Nau inducing capacity (Figure 2 and Figure S1). The dispensability of the HX, TD and UA domains for specifying abdominal features of somatic muscle pattern is consistent with the fact that nau activation by AbdA is not dependent upon Exd activity [32], although results below argue that these domains assume other functions than Exd recruitment. In the embryonic central nervous system, a subset of 30 neuroblasts (NB' s) found in each hemisegment, including the NB5–6, generate a larger lineage in the thorax than in the abdomen. Recent studies demonstrated that posterior Hox genes, such as abdA, impose in the abdomen a smaller NB5–6 lineage by triggering an early cell cycle exit [39]. Misexpression of AbdA within NB5–6 in the thorax using ladybird (K) -Gal4 result in an early lineage truncation, mimicking the situation that normally occurs in the abdomen, ultimately leading to a smaller thoracic NB5–6 lineage size (Figure 3). Average number of NB5–6 cells in wild type thoracic and abdominal segments was previously estimated at 16 and 6 cells respectively: these values were considered as references for full (100%) or complete loss (0%) of repressive activities of AbdA variants on NB5–6 lineage. Intermediate repressive levels upon ectopic expression with ladybird (K) -Gal4 were deduced from the quantification of NB5–6 lineage cell numbers in thoracic segments T2/3 (see methods). Results obtained indicate that lineage truncation triggered by AbdA is similarly affected following UA, HX/UA, TD/UA and HX/TD/UA mutations (Figure 3), which can be best explained by a unique requirement of the UA domain for AbdA function. In the visceral mesoderm, AbdA is expressed in parasegment (PS) 8–12. The target genes wg and dpp are respectively activated (in PS8) and repressed (in PS8–12) by AbdA in the visceral mesoderm. Restricted (PS8) activation of wg by AbdA results from the action of the Dpp signal, locally produced by PS7 cells under the control of the Ubx protein [40]. Accordingly, anterior ectopic expression of AbdA only results in a mild activation of wg, as activation only occurs in cells experiencing partial repression of dpp [27]. Previous work has shown that the HX mutation results in a protein that activates dpp instead of repressing it, and consequently more efficiently activates wg [41]. AbdA variants were expressed with the 24B-Gal4 driver. Levels of regulatory activities were deduced following fluorescent in situ hybridization against dpp or wg in the visceral mesoderm of stage 14 embryos in PS1–PS7, ie anterior to endogenous AbdA expressing cells (PS8–12; Figure 4 and Figures S2 and S3). Arbitrary values have been assigned to regulatory activities of AbdA variants. For dpp (Figure 4A and Figure S2), no effect on dpp expression was scored by 0, normal repression of dpp expression in PS7 by 100 (partial repression was never observed) and ectopic activation (instead of repression) of dpp was scored by negative values (depending of the number of ectopic sites (see Text S1). For wg, in a manner similar to dpp, no effect was scored by 0, and positive and negative values were respectively assigned to normal (activation) or abnormal (repression) activities on wg expression (Figure 4B and Figure S3; see Text S1). Results obtained allow two conclusions. First, single domain mutations result in strong modification of AbdA activity. Second, domain mutations often result not only in a quantitative, but also in a qualitative (neomorphic) modification of activity, changing AbdA from an activator to a repressor, or reversely from a repressor to an activator. Oenocytes form under AbdA control in segments A1–A7. This occurs through AbdA-dependent activation of Rhomboid (Rho) in a chordotonal organ precursor cell called C1. Expression of Rho then enables the secretion of the EGF ligand Spitz that will instruct neighboring epidermal cells to differentiate into oenocytes [30]. In absence of AbdA, the EGF pathway is not locally activated and oenocytes are not specified [30]. Reversely, ectopic expression of AbdA induces oenocytes in thoracic segments. AbdA variants were ubiquitously expressed with the armadillo (arm) -Gal4 driver. Oenocyte inducing potential of AbdA variants, visualised with the seven-up (svp) -lacZ enhancer trap reporter construct, was deduced from the number of thoracic segments that contain ectopic oenocytes (see Text S1). This inductive potential is reduced following single mutations of the UA domain and combined mutation of the HX/TD or TD/UA domains, and is abolished following HX/UA and HX/TD/UA mutations (Figure 5 and Figure S4). These observations suggest an additive contribution of the HX, TD and UA protein domains for oenocyte induction by AbdA, consistent with protein domains acting independently of each other, and contributing uniquely through additive contribution to protein activity. The tracheal cerebral branch forms dorsally exclusively in the second thoracic segment T2, in response to repressive activities of Bithorax Hox proteins in T3-A8 segments [42]. This phenotypic trait can be followed by a breathless (btl) driven GFP reporter that extends posteriorly in the absence of Bithorax complex genes, and that is suppressed in T2 following Btl-driven expression of AbdA in the tracheal system (Figure 6A). Only full repression of cerebral branches was considered and repressive activities of AbdA variants thus correspond to either 0% (no repression) or 100% (full repression) (see Text S1). We found that the repression of the cerebral branch by AbdA is impaired following TD/UA and HX/TD/UA but not HX/UA or HX/TD mutations, revealing a functional redundancy between the TD and UA domains (Figure 6A, and Figure S5). In the embryonic heart, abdominal segments are made of six pairs of cells, instead of four in thoracic segments [37]. This difference was shown to result from AbdA (and Ubx) promoting the six cell lineage in the abdomen [37], and in the thorax following AbdA ubiquitous expression in the mesoderm driven by the 24B-Gal4 driver ([37], Figure 6B). The visualization of the lineage is facilitated by a Dorsocross (Doc) staining, that labels two cells in each hemisegment, allowing to unambiguously identify each hemisegment. Effects of AbdA variants in cardiac cells specification were visualized by double fluorescent immunostaining against AbdA and Dorsocross (Doc). The six cell lineage inductive capacity of AbdA was scored by counting the number of cardiac cells in the T2 and T3 segments (see Text S1). Results showed that the six cell lineage inductive ability of AbdA is lost following HX/UA and HX/TD/UA mutations (Figure 6B and Figure S6). These observations again highlight functional redundancy, but between the UA and HX domains, instead of TD and UA domain as observed in cerebral branch specification. Additional examples of functional redundancy, yet in more complex pattern of interactions between protein domains were found in the biological contexts described below. The limb-promoting gene Distalles (Dll) and Hox gene Antennapedia (Antp) are direct targets of AbdA [26], [43]. The ability of AbdA variants, following ubiquitous expression through the arm-Gal4 driver, to repress Dll (Figure 7A and Figure S7) and Antp (Figure 7B and Figure S8) was evaluated by examining the activity of a Hox responsive Dll enhancer (DME, [44]) and the expression of the Antp protein, respectively (see Text S1). Single domain mutations do not strongly affect repressive activities of AbdA on Dll and Antp, leading to a mean loss of 40%, with the exception of the TD mutation, which affects more (60%) the repressive activities on Antp. Combining domain mutations leads to stronger effects: in the case of Dll, simultaneous mutation of the HX and UA domains almost completely abolishes AbdA repressive activities, while in the case of Antp simultaneous mutation of the HX and UA domains or TD and UA domains results in a loss of 70% of AbdA repressive activity. More surprisingly, simultaneous mutation of the HX, TD and UA domains does not compromise further AbdA activity but instead restores a significant level of repressive activity, comparable to that of single domain mutated AbdA variants. This indicates that the three protein domains do not provide independent regulatory input, but likely act in interactive and mutually inhibitory ways. A similar yet more complex pattern of domain interactions was observed in the specification of A2 epidermal morphology. In this tissue, AbdA promotes the formation of a stereotyped trapezoidal arrangement of denticle belts (Figure 7C). The potential of AbdA variants to specify A2 epidermal morphology was assessed following arm-Gal4 driven expression by scoring the denticle belts morphology and organisation in transformed A1 and thoracic segments (Figure 7C and Figure S9). Epidermal specification was not impaired by HX and slightly reduced by UA or TD mutations. Simultaneous mutation in two domains suggests functional redundancy between HX and TD, UA and HX but not between UA and TD domains. As noticed previously for the regulation of Dll and Antp in the epidermis, mutating the three domains simultaneously restores the activity, generating a protein that displays an activity close to the wild type protein. In many animals including vertebrates, locomotion results from the coordinated action of regionally distinct sets of movements. Drosophila larvae crawl by means of three region specific movements [38]. The locomotion cycle starts by a contraction of the most posterior abdominal segments (A8/A9), followed by a wave of peristaltic movement in A1–A7, where each segment is transiently lifted up (D/V movement), pulled forward and lowered, starting from A7. When the wave reaches A1, the thoracic and head segments start moving by a telescopic type of movement (A/P movement), occurring through contraction of anterior segments [38]. It was established that AbdA is necessary and sufficient to specify the abdominal type of movement, namely abdominal peristalsis [38]. The potential of wild type and AbdA variants to promote abdominal peristalsis was evaluated following arm-Gal4 driven expression (Figure 7B), by scoring in the T3 thoracic segment D/V movements (see Text S1). Single domain mutations do not significantly alter promotion of abdominal peristalsis (Figure 7D and Figure S10). Again, two types of functional redundancy were observed: between the TD and UA domains, and to a lesser extent between the HX and UA domains. As in the case of Dll and Antp regulation and A2 epidermal morphology specification, triple domain mutation corrected the effects of double mutations, with a protein promoting abdominal peristalsis as efficiently as the wild type protein, providing an additional example of mutually suppressive activity of protein domains. Previous studies have established that Exd is required for Dll [25] and wg [45] regulation, oenocytes [30] and epidermal morphology specification [46], and neuroblast lineage commitment [37], while dispensable for Antp [46] and dpp [47] regulation. In the case of cerebral branch specification, no conclusion could be reached since loss of Exd results in the absence of cerebral branch formation in the T2 segment [48]: this positive input of Exd hinders the assessment of a possible contribution for AbdA mediated cerebral branch repression in abdominal segments. The potential implication of Exd in AbdA-mediated heart lineage commitment and larval locomotion is not known. Staining for Doc1 in embryos deprived for maternal and zygotic Exd showed that the abdominal hemi segments adopt the AbdA-dependent six cell lineage, showing the dispensability of Exd for this AbdA function (Figure 6C). The requirement of Exd for larval locomotion has been examined in homothorax (hth) mutant that impairs Exd nuclear transport and mimics exd maternal and zygotic loss [49]. The absence of peristaltic waves in this genetic context indicates a strict requirement of Exd for abdominal peristalsis (Figure S10). Taken together with the protein domain requirement results, the exd dependency indicates that the HX, UA and TD domains, known (HX and UA) or candidate (TD) Exd recruiting domains, are also required for Exd-independent function. This is supported by the HX/UA requirement for heart lineage specification, by the HX and UA requirement for proper regulation of the dpp target gene, the HX/TD requirement for Antp repression and the requirement of TD for dpp target regulation. Collectively, this highlights that the HX and UA (and likely TD) protein domains are multifunctional, serving in some biological context Exd interaction function, while in others, they are used differently, for a molecular activity that still remains to be defined. The complete set of quantitative data was analyzed using a hierarchical clustering method (Figure 8; see Materials and Methods). Clustering according to biological readouts does not reveal any clear grouping, regarding for instance developmental stage or tissue type, suggesting that the forces that govern domain usage and interaction between protein domains mostly reside in the regulated target gene. By contrast, clustering according to protein domains clearly reveals a hierarchical requirement of the domains for the various AbdA functions analyzed here. A bipartition of AbdA variants is observed, with the mutants for the HX, the TD and HX/TD domains on the one hand, and variants mutant for the UA domain, alone or in combination, on the other hand. Such bipartition suggests the existence of two functional modules that can be distinguished based on UA domain requirement. The first module, which relies mostly on the HX and TD domains, is used for a small subset of AbdA functions only. The second module relies on the activity of the HX, TD and UA domains, yet the requirements of the HX and TD domains are revealed only in UA deficient context. Thus, the driving force in this second functional module is the UA domain, as its mutation unmasks the requirement for the HX and TD domains, which is not revealed by their single or combined mutations. These results identify a prominent role of the UA domain in AbdA function. Studies towards deciphering the mode of action of Hox proteins have so far essentially concentrated on how individual protein domains contribute to protein function. These focused approaches allowed in depth analyses, unraveling the intimate molecular and sometimes structural details of how protein domains contribute to protein function, providing decisive insights into how Hox proteins reach specificity. This work provides a different complementary approach towards deciphering the mode of action of Hox proteins. First it aims at studying protein domains in combinations, using combined and not only single protein domain mutations, considering that the overall protein activity is likely not a sum of the activity of individual protein domains, and that novel properties may emerge from interactions between protein domains. Second, it uses extensive in vivo biological readout, (most of the known AbdA functions), instead of a single or a few functions. While impairing the in depth analyses of previous focused approaches, the large functional window covered by this study allows the identification of features underlying the intrinsic functional organization of the Hox protein AbdA. Although the approach taken relies on a gain of function strategy, special care was taken to select experimental conditions where proteins were expressed closed to physiological levels of expression. Biological readouts considered are functions that AbdA can sustain in ectopic places, suggesting that availability of AbdA protein partners is not a limitation of the experimental strategy chosen. Finally, the effects of expressing the AbdA variants (in all eleven biological readouts) were scored in regions anterior to the endogenous AbdA expression domain (ie in cells where the endogenous wild type gene product is not present), avoiding any further complexity that may result from competition with the endogenous AbdA protein. Below, we summarize how results obtained shed light on the mode of action of the Hox protein AbdA and discuss the evolutionary implications. This study identifies salient features underlying the intrinsic functional organization of the AbdA Hox transcription factor. Protein domains often display functional redundancy, with strong effects in most cases requiring simultaneous mutations of two or three domains. Redundancy was frequently observed between the HX and UA domains, or between the TD and UA domains, while redundancy between the HX and TD domains is less frequent (Figure 8). This indicates that redundancy does not necessarily rely on functional compensation through structurally related domains, since the HX and TD are closely related domains, while the UA domain is completely unrelated. Thus, functional redundancy rather reflects the potential to perform similar activities through distinct molecular strategies. This property likely confers robustness to Hox protein activity, accommodating mutations in protein domains without generally impacting on regulatory activities. Protein domains within AbdA also generally do not act as independent functional modules, but instead display a high degree of interactivity, as demonstrated by the non-additive effects of domain mutations in the majority of the biological readouts studied. In addition, protein domains are often multifunctional, in the sense that they serve different molecular functions. This is illustrated by the fact that the HX and UA domains, previously described to mediate Exd recruitment, are also required for Exd-independent processes. Thus domain interactivity and multifunctionality are hallmarks of AbdA regulatory activity. These properties provide means to apprehend the bases underlying Hox functional diversity with a restricted number of functional modules, and therefore may account for the variety of Hox-controlled biological functions. Protein domain usage and interaction between protein domains in AbdA strongly depends on the biological readout, suggesting that domain usage largely depends on the regulated target gene, and hence on the identity of the gene cis regulatory sequences. Recent reports support that DNA sequences impact on Hox protein activity: Hox binding site neighboring sequences are important for proper regulation of the reaper downstream target [50]; Sex combs reduced changes its conformation and activity depending on the cognate sequence [51]. Of note, a role for the target sequence in controlling the structure and activity of the glucocorticoid receptor has also been recently reported [52], indicating that this may generally apply for many DNA binding transcription factors. Our results also have implication on how modifications in protein sequences are translated into changes in protein function during evolution. The HX domain, common to all Hox proteins, is ancient and found in all bilaterians, and provides a generic mode of PBC interaction (Figure 9). The UA domain, specific to some central Hox proteins (AbdA and Ubx in Drosophila), was acquired later, at the time of protostome/deuterostome radiation. It provides a distinct yet to be characterised PBC interaction mode, specific to some Hox paralogues only, allowing fine-tuning of Hox protein activity [22]. TD is found only in insect AbdA and not in Ubx proteins, suggesting that it arose after the duplication that generated Ubx and AbdA in the common ancestor of insects (Figure 9). Remarkably, within AbdA arthropod proteins, the HX domain has significantly diverged in some lineages like anopheles, while the TD domain has been strictly conserved. Conceptually, two non-exclusive models could account for the evolution of protein function following the acquisition of a novel protein domain. In the first one, the acquisition provides a novel molecular and functional property, which adds to pre-existing ones. This is for example the case for the acquisition of the QA domain that confers repressive function to Ubx [6], and the acquisition/loss of HX or LRALLT domains by Futzitarazu (Ftz) from distinct insect species, which provides Ftz with the capacity to recruit either Exd or FtzF1 cofactors and switches its activity from a Hox to a segmentation protein [53]. In the second model, the acquisition of a novel protein domain interferes with the activity of pre existing domains, reorganizing the intrinsic functional organization of the protein. This view is supported by the predominant role of the UA domain and the widespread domain interactivity seen in this study. Evolutionary changes in animal morphology is thought to mostly rely on changes in cis-regulatory sequences [1]. This is conceptually supported by the modular organization of cis-regulatory sequences, allowing subtle and cell specific changes in gene expression not deleterious for the animal. Experimentally, it is largely supported by the correlation between expression of key developmental regulatory genes and morphological changes (for example see [54]), and by changes in cis-regulatory sequences that impact on morphological traits [55]–[59]. Changes in animal morphology could also result from changes in protein sequence and function, as shown for Hox proteins in the morphological diversification in arthropods [4], [6]. However, changes in protein function are not believed to broadly contribute to morphological diversification during animal evolution, based on the assumption that changes in protein sequences are expected to have pleiotropic effects, which as such, do not provide a mean to convey subtle and viable evolutionary changes. Our work grasps redundancy and selectivity in protein domain usage and as salient features of AbdA transcription factor intrinsic regulatory logic: even the HX domain, evolutionarily conserved in all Hox proteins, is essential for only one AbdA function, and often acts in a redundant way with the TD or the UA protein domains. Selective use of protein domains is also supported by findings of a few smaller scale studies of three other Drosophila Hox proteins: viable missense or small deletion mutations within the Scr protein coding sequences falls in different allelic series when examined for three distinct biological readouts [60]; deletion of C-terminal sequences of the Ubx protein, starting from an insect specific QA protein domain preferentially affects a subset of Ubx function [61]; dispensability of the HX was reported for the leg inducing capabilities of the Antp Hox protein, while required for other Antp functions [62]. This context dependent selective mode of protein domain usage, or differential pleiotropy, may be essential for the evolution of Hox protein functions, as it ensures developmental robustness of a Hox-controlled program while being permissive to evolutionary changes endowing novel functions to preexisting protein domains. In addition, our work also establishes that interactivity between protein domains is highly context dependent, suggesting that Hox protein function not only relies on selective mode of protein domain usage but also on selective mode of protein domain interactivity. Altogether, these observations challenge the view that changes in protein sequences necessarily have pleiotropic effects, giving more room for protein changes in the evolution of animal body plans. 24B-Gal4 and arm-Gal4 were used as embryonic mesodermal and ubiquitous drivers, respectively. Btl-Gal4 and lbe (K) -Gal4 for specific expression in the tracheal system and NB5–6 neuroblasts, respectively. The DME-lacZ and svp-lacZ lines are respectively from R. Mann (Columbia Univ., NY, USA) and S. Zaffran (IBDML, Marseille, France). exdXP11 and hthP2 alleles were used. Embryo collections, cuticle preparations, in situ hybridizations, and immunodetections were performed according to standard procedures. Digoxigenin RNA-labelled probes were generated according to the manufacturer' s protocol (Boehringer Mannheim, Gaithersburg, MD) from wg and dpp cDNAs cloned in Bluescript. Primary antibodies used are: anti-Antp (4C3, dilution 1/100, Developmental Studies Hybridoma Bank (DSHB) ); rabbit anti-AbdA (1/1000); guinea-pig anti-Doc2+3 (1/400) and rabbit anti-Dmef2 (1/2000) from L. Perrin (IBDML, Marseille, France); rabbit anti-Exd (1/1000) from R. Mann; rabbit anti-Nau (1/100) from BM Paterson (University of Texas Southwestern Medical Center, Dallas, TX); rabbit (1/500) or mouse (1/200) anti-GFP (1/500) from Molecular Probes; chicken anti-GFP (1/1000) from Aves labs; mouse anti-β-galactosidase (1/1000) from Promega; rabbit anti-β-galactosidase (1/1000) from MP Biomedical; anti-digoxigenin coupled to biotin (1/500) from Jackson. Secondary antibodies coupled to Alexa 488, Alexa 555 (Molecular Probes) or to biotin (Jackson) were used at a 1/500 dilution. AbdA variant were generated by PCR. Domain mutations were YPWM→AAAA; TDWM→AVAI; KEINE→KAAAA. The homeodomain point mutation alleviating DNA binding is a mutation of position 50 (Q→K; [47]). Constructs were cloned in pUAST or pUASTattB vectors for transgenic line establishment. Lines were crossed with the appropriate driver, and collected embryos were stained with anti-AbdA to select the conditions (line and temperature) that result in expression levels similar (+/−15%) to AbdA wild type levels in A2 (see [22] for a detailed description of the procedure). Procedures used for quantification of biological readouts using at least 10 embryos of each genotype are provided in Text S1. A matrix containing the values corresponding to the readout was built. The extreme values were given to the total loss of activity (value 0), and to the wild type activity (value 1 for 100% of activity). A hierarchical clustering algorithm (with Euclidian distance and average linking) was applied to the matrix using the MeV software suite [63]. The jacknife method was used for re-sampling the data and provides a statistical support for each tree node. Boxplots drawn using the R-Software. Boxplot depicts the value distribution obtained for each tested genotype. Black points correspond to individual counts.
Proteins perform essential regulatory functions, including control of gene transcription, a process central to development, evolution, and disease. While protein domains important for protein activity have been identified, how they act together to define the activity of a protein remains poorly explored. The predominant view influenced by prokaryotic transcription factors is that protein domains constitute independent functional modules, required for all aspects of protein activity. In this study, we used Hox proteins, evolutionarily conserved transcription factors playing key roles in the establishment of animal body plans, to examine how protein domains collectively shape protein activity. Results obtained using a broad range of biological readouts highlight a context-dependency in protein domain usage and interaction, revealing that protein domains are non-pleoitropic in nature. This suggests that protein modification may contribute more broadly to molecular changes underlying morphological diversity, so far thought to rely largely on modification of gene cis-regulatory sequences.
lay_plos
Write a title and summarize: Genome-scale metabolic models are available for an increasing number of organisms and can be used to define the region of feasible metabolic flux distributions. In this work we use as constraints a small set of experimental metabolic fluxes, which reduces the region of feasible metabolic states. Once the region of feasible flux distributions has been defined, a set of possible flux distributions is obtained by random sampling and the averages and standard deviations for each of the metabolic fluxes in the genome-scale model are calculated. These values allow estimation of the significance of change for each reaction rate between different conditions and comparison of it with the significance of change in gene transcription for the corresponding enzymes. The comparison of flux change and gene expression allows identification of enzymes showing a significant correlation between flux change and expression change (transcriptional regulation) as well as reactions whose flux change is likely to be driven only by changes in the metabolite concentrations (metabolic regulation). The changes due to growth on four different carbon sources and as a consequence of five gene deletions were analyzed for Saccharomyces cerevisiae. The enzymes with transcriptional regulation showed enrichment in certain transcription factors. This has not been previously reported. The information provided by the presented method could guide the discovery of new metabolic engineering strategies or the identification of drug targets for treatment of metabolic diseases. Systems Biology aims to use mathematical models to integrate different kinds of data in order to achieve a global understanding of cellular functions. The data to be integrated differ both in their nature and measurability. The availability of DNA microarrays allows for the comparative analysis or mRNA levels between different strains and conditions. These data provide genome-wide information, and changes in expression at different conditions are expressed in statistical terms such as p-values or Z-scores that quantify the level of significance in transcriptional changes. The availability of annotated genome-scale metabolic networks allowed mapping of the transcriptional changes in metabolic genes on to their corresponding metabolic pathways and defining significantly up or down regulated sub-networks [1]. Even though this allows for identification of transcriptional hot-spots in metabolism, this does still not provide information about whether there are any changes in metabolic fluxes in these pathways, as it has been shown that in general there is no clear correlation between gene expression and protein concentration [2] or metabolic flux [3], [4]. Metabolic fluxes are the result of a complex interplay between enzyme kinetics, metabolite concentrations, gene expression and translational regulation. Metabolic fluxes can be directly measured using 13C labeling experiments [5]. However, flux data obtained using this approach differ from gene expression data in two main features: 1) their determination is only possible for a relatively small subset of all the reactions in a genome-scale metabolic network and 2) they are indirect data in the sense that the fluxes are quantified obtained by fitting measured labeling patterns using a simple metabolic model. The complexity of the mRNA-flux dependence and the disparity in the nature of both kinds of data make their integration an important challenge. In this paper we propose a method to integrate gene expression data with flux data by transforming a limited amount of quantitative flux data into a genome-scale set of statistical scores similar to the one obtained from DNA microarrays. In order to do that, a set of experimental exchange fluxes are fixed for each of the studied conditions or for each of the strains investigated, and a sampling algorithm is then used to obtain a set of flux distributions satisfying the experimental values. This approach allows for obtaining means and standard deviations for each flux in the genome-scale network. From the mean and standard deviation it is possible to derive statistical scores for the significance of flux change between conditions [6], [7]. Random sampling in the region of feasible flux distributions has been previously used to study the statistical distribution of flux values and determine a flux backbone of reactions carrying high fluxes [8] as well as to define modules of reactions whose fluxes are positively correlated [9], [10]. Also mitochondria related diseases have been analyzed using random sampling [11]. All the works published so far used the Hit and Run algorithm to perform the sampling [7]. By dividing the average difference among two conditions (e. g. carbon sources or mutant strains) by its standard deviation, it is possible to obtain Z scores for each metabolic flux. These scores can be transformed into p-values that measure the significance of change of each flux (see methods). By comparing these p-values with the p-values derived from gene-expression arrays, the enzymes in the network can be classified as: 1) enzymes that have a significantly correlated change both in flux and expression level (reactions showing transcriptional regulation), 2) enzymes that show a significant change in expression but not in flux (we will refer to them as showing post-transcriptional regulation) and 3) enzymes that show significant changes in flux but not a change in expression (metabolic regulation). Hereby we provide a framework that allows for global classification of reaction fluxes into those that are transcriptionally regulated, post-transcriptionally regulated and metabolically regulated (see Fig. 1). This will have substantial impact on the field of metabolic engineering where changes in gene-expression are often used as the key means to alter metabolic fluxes. In the paper we show the use of the presented framework for the analysis of the yeast Saccharomyces cerevisiae grown at different growth conditions and for the analysis of different deletion mutants. The combined use of random sampling of genome-scale metabolic networks and expression data allows for global mapping of reactions that are either transcriptionally or metabolically regulated. This information can be used to guide the engineering of microbial strains or as a diagnosis tool for studying metabolic diseases in humans. In particular we should highlight that reactions in which there is no relation between gene transcription level and metabolic flux are not suitable targets for flux increase via gene over-expression. Through analysis of different data sets the method revealed that many changes in gene expression are not correlated with a corresponding change in metabolic fluxes. The use of gene-expression data alone can therefore be misleading. However, our method allowed for identification of many specific reactions that are indeed transcriptionally regulated, and we further identified that the expression of these enzymes is regulated a few key transcription factors. This fact suggests that the regulation of metabolism has evolved to contain a few flux-regulating transcription factors that could be the target for genetic manipulations in order to redirect fluxes. Here we propose a sampling method that finds extreme solutions among the feasible flux distributions of the metabolic network. These solutions correspond to the corners in the region of allowed flux distributions, and in mathematical terms they are elements of the convex basis of the region of feasible solutions (which is a convex set). The COBRA Toolbox [12] includes a random sampling option that uses the Hit and Run algorithm [13] to obtain points uniformly distributed in the region of allowed solutions. The difference between the two sampling methods is illustrated in Fig. 2. In order to assess the accuracy of our sampling method to estimate the average fluxes and their standard deviations, we compared a set of internal fluxes measured with 13C labeling [14] with predictions using 500 sampling points obtained using the sampling method in the convex basis and 500 sampling points obtained using the sampling algorithm implemented in the COBRA Toolbox. The results are summarized in Table 1 where our method is labeled Convex Basis (CB), because it samples elements of the convex basis of the region of allowed solutions (see above), and the method from the COBRA Toolbox is labeled Hit and Run (HR). The Z values in the table are the number of standard deviations that the real value is deviating from the calculated mean. The means obtained by the two sampling methods are very similar for most of the reactions; however the standard deviations found using the HR algorithm are significantly smaller. With the HR method the real values for the fluxes in many cases deviate several standard deviations from the mean, A high value of Z indicates that the real value has a very low chance of being obtained using the considered sampling method (or in other words: the real value does not belong to the family of solutions that is generated by the sampling method). The number of samples with the HR algorithm was increased up to 5000 to check possible effects of the sample size on the standard deviation. Only small increases were observed for the standard deviations of the studied fluxes. Using the CB algorithm we obtain higher standard deviations and the real flux is for most reactions less than one standard deviation away from the mean flux. We can therefore conclude that the CB sampling method gives more realistic standard deviations for the fluxes. This is important if we want to compare the significance of flux changes between conditions. An underestimated standard deviation would make some flux changes appear as being significant even though they may not be in reality, and our method therefore gives a more conservative list of significantly changed reaction fluxes than the HR algorithm. To evaluate our method we used data for the yeast S. cerevisiae. Data from growth on four different carbon sources (glucose, maltose, ethanol and acetate) in chemostat cultures and five deletion mutants (grr1Δ, hxk2Δ, mig1Δ, mig1Δmig2Δ and gdh1Δ) grown in batch cultures were used. The exchange fluxes and gene expression data for the mentioned conditions have been published earlier [15]–[17]. Our method obtains probability scores for each enzyme in the metabolic network (see methods) and this allowed us to classify the enzymes as transcriptionally regulated (correlation between flux and gene expression), post-transcriptionally regulated (changes in gene expression don' t cause changes in flux) and metabolically regulated (changes in flux are not caused by changes in gene-expression). The cut-off chosen for this classification was a probability score above 0. 95. Tables 2 and 3 show the 10 top scoring enzymes in each group (or fewer when less than 10 enzymes had a score exceeding 0. 95). The method is illustrated in Fig. 3. The method to identify the significance of flux changes relies on a set of measured external fluxes, and in some cases strains that don' t show significant changes in external fluxes have changes in internal fluxes [18]. These changes cannot be identified with our method, and our estimations of the significance of flux changes can therefore be seen as conservative estimates. The lists of transcriptionally and metabolically regulated reactions are therefore more reliable than the list of post-transcriptionally regulated reactions (in which some fluxes may be changed in reality but their change pass undetected). The reactions showing transcriptional regulation form a set of putative targets where enzyme over-expression or down regulation will influence the flux through these reactions. The reactions showing metabolic regulation points to parts of the metabolism where the pools of metabolites are possibly increasing or decreasing in connection with transcriptional changes and hereby counteracting possible changes in enzyme concentration as a result of transcriptional changes. This knowledge can be used to identify whether one should target changes in enzyme concentration (vmax changes), e. g. through over-expression, or changes in enzyme affinity (Km changes), e. g. through expression of heterologous enzymes, in order to alter the fluxes. The steady state condition and the irreversibility of some reactions impose limitations on the flux distributions attainable by the cell [18]. The set of feasible solutions can be further constrained by fixing some fluxes to their experimental values. In general, the fluxes most accessible to experimental determination are those corresponding to uptake or secretion rates. After fixing a subset of fluxes, genome scale models still have a large number of degrees of freedom. In this study we used the genome scale model iFF708 for S. cerevisiae [27]. Random sampling has previously been performed [7] by enclosing the region of allowed solutions in a parallelepiped with the same dimensions as solution space (the null space of the stoichiometric matrix) and generating random points inside this parallelepiped. The points that lie inside the region of possible solutions are then selected. The COBRA Toolbox [12] uses a Hit and Run algorithm to generate random points in this way. In this work instead of sampling inside the region of allowed solutions we sampled at its corners. In order to obtain corners in the space of allowed solutions we used the simplex method with a random set of objective functions to be maximized. The maximization of each of these objective functions will give a corner in the space of solutions. The constraints imposed upon each optimization are: (1) (2) (3) The values of the measured fluxes (vexp) are different between conditions. This fact changes the shape of the region of feasible solutions between different conditions. S is the stoichiometric matrix of the network. In order to reduce the effects of internal loops we first identified all the reactions that can get involved in loops using the FVA (Flux Variability Analysis) option in the COBRA Toolbox. The reactions that can be involved in loops are unbounded and show the default maximal or minimal value set in the COBRA Toolbox (1000 or −1000). If these bounds were kept, the means and standard deviations for these reactions would be unrealistic [6] and cannot be used for further analysis. In order to reduce the effect of loops, the default maximal and minimal fluxes for the reactions involved in loops, were set to a smaller value in order to reduce the loop effect. In order to select an appropriate value the bounds were increased from 0 in steps of 0. 1 until the minimal value that allows obtaining flux distributions consistent with the experimental fluxes is found. These values went from 1 to 15 mmol h−1g-DW−1 depending on each condition. Also no weights (eq. 4) were assigned to the reactions involved in loops in order to avoid objective functions that maximize the activity of loops. Random objective functions were generated by selecting random pairs of reactions and assigning them random weights (the reactions involved in loops were excluded from these choices). The weights (wi) assigned to each reaction were generated by dividing a random number between 0 and 1 by the maximal flux for this reaction obtained using FVA. This normalization was made to account for the different size orders of the different reactions. The objective functions take the form: (4) One solution is obtained for each of the objective functions generated. Our objective is to obtain means and standard deviations for each flux in each of the compared conditions and use them to get a Z-score quantifying the significance of change in each flux between the considered conditions. This score is equal to the difference between the means in each of the conditions divided by the standard deviation of this difference (note that the variance of the difference is the sum of the two variances and the standard deviation its square root). (5) The difference between averages in the numerator follows a normal distribution (according to the central limit theorem) with a standard deviation equal to the standard deviation of the flux (the denominator in eq. (5) ) divided by the square root of the number of samples. Therefore, Z itself follows a normal distribution with a standard deviation equal to the inverse of the square root of the number of samples. The Z score measures the significance of change in terms of standard deviations. If the error in the Z score is lower than 0. 15, no information would be lost in terms of classifying a reaction as significantly changed or not. The order of size of a genome-scale model is about 1000 reactions. A reasonable accuracy for the Z-scores would be to expect errors higher than 0. 15 on the Z score only for 1 reaction in the whole model. This means a p-value of 0. 001. If we want to keep the error on the Z score under 0. 15 with a probability of 0. 999 we need 500 samples, and this was therefore selected as the sampling number. The Z-scores can be transformed into probabilities of change by using the cumulative Gaussian distribution. Once we have Z-scores for the significance of flux changes and Z-scores for the significance of gene-expression changes we can obtain probabilities of having correlated expression and flux changes for each enzyme. An increase in enzyme expression can result in an increase of flux (transcriptional regulation). In order to evaluate the probability for a reaction of being transcriptionally regulated we multiply the probability of its enzyme level changing by the probability of its flux changing in the same direction (obtained using the cumulative normal distribution). (6) (7) If there is a decrease in expression and a decrease in flux, both Z-scores are negative and we will use the absolute values of the Zs in eq. (6). If there is an increase in expression and a negative flux becomes more negative, we will use the absolute value of the Z-score for the flux change. If the direction of the flux changes between conditions, this change must be driven by the metabolic concentrations and no by transcriptional regulation, therefore a Ptri of zero is assigned by default. In the same way as in eq. (6) we can define probabilities for the expression level changing and for the flux not changing (post transcriptional regulation). (8) (9) Now we use the error function because we want to evaluate the probability of change in any direction. The absolute value of Z is used in all the cases. The probability of a change in flux but not in transcription (metabolic regulation) can be obtained for each reaction as follows: (10) Each of these three probabilities can be associated to each enzyme in the metabolic network. Table 4 summarizes the criteria to assign each type of regulation.
The sequencing of full genomes and the development of high-throughput analysis technologies have made available both genome-scale metabolic networks and simultaneous transcription data for all the genes of an organism. Genome-scale metabolic models, with the assumption of steady state for the internal metabolites, allow the definition of a region of feasible metabolic flux distributions. This space of solutions can be further constrained using experimental flux measurements (normally production or uptake rates of external compounds). Here a random sampling method was used to obtain average values and standard deviations for all the reaction rates in a genome-scale model. These values were used to quantify the significance of changes in metabolic fluxes between different conditions. The significance in flux changes can be compared to the changes in gene transcription of the corresponding enzymes. Our method allowed for identification of specific reactions that are transcriptionally regulated, and we further identified that these reactions can be ascribed to a few key transcription factors. This suggests that the regulation of metabolism has evolved to contain a few flux-regulating transcription factors that could be the target for genetic manipulations in order to redirect fluxes.
lay_plos
Write a title and summarize: At least 25 inherited disorders in humans result from microsatellite repeat expansion. Dramatic variation in repeat instability occurs at different disease loci and between different tissues; however, cis-elements and trans-factors regulating the instability process remain undefined. Genomic fragments from the human spinocerebellar ataxia type 7 (SCA7) locus, containing a highly unstable CAG tract, were previously introduced into mice to localize cis-acting “instability elements, ” and revealed that genomic context is required for repeat instability. The critical instability-inducing region contained binding sites for CTCF—a regulatory factor implicated in genomic imprinting, chromatin remodeling, and DNA conformation change. To evaluate the role of CTCF in repeat instability, we derived transgenic mice carrying SCA7 genomic fragments with CTCF binding-site mutations. We found that CTCF binding-site mutation promotes triplet repeat instability both in the germ line and in somatic tissues, and that CpG methylation of CTCF binding sites can further destabilize triplet repeat expansions. As CTCF binding sites are associated with a number of highly unstable repeat loci, our findings suggest a novel basis for demarcation and regulation of mutational hot spots and implicate CTCF in the modulation of genetic repeat instability. Trinucleotide repeat expansion is the cause of at least 25 inherited neurological disorders, including Huntington' s disease (HD), fragile X mental retardation, and myotonic dystrophy (DM1) [1]. One intriguing aspect of trinucleotide repeat disorders is ‘anticipation’ – a phenomenon whereby increased disease severity and decreased age-of-onset are observed as the mutation is transmitted through a pedigree [2]. In spinocerebellar ataxia type 7 (SCA7), for example, disease onset in children, who inherit the expanded repeat, averages 20 years earlier than in the affected parent [3]. The basis of the profound anticipation in SCA7 stems from a significant tendency to undergo large repeat expansions upon parent-to-child transmission [4]. Other similarly-sized, disease-linked CAG/CTG repeat tracts do not exhibit strong anticipation, and are much more stable upon intergenerational transmission, as occurs at the spinobulbar muscular atrophy (SBMA) disease locus [5]. Drastic differences in the stability of CAG/CTG repeats, depending upon the locus at which they reside, strongly support the existence of cis-acting DNA elements that modulate repeat instability at certain loci. Furthermore, dramatic variation in CAG tract instability in tissues from an individual patient, together with disparities in the timing, pattern, and tissue-selectivity of somatic instability between CAG/CTG disorders, indicates a role for epigenetic modification in DNA instability [1], [6]–9. While the existence of cis-elements regulating disease-associated instability is widely accepted, the identities of cis-elements that define the mutability of any repeat are still unknown. Proposed cis-elements that regulate repeat instability include: the sequence of the repeat tract, the length and purity of the repeat tract, flanking DNA sequences, surrounding epigenetic environment, replication origin determinants, trans-factor binding sites, and transcriptional activity [10]–[12]. Such cis-elements may enhance or protect against CAG tract instability. To identify cis-elements responsible for CAG expansion at the SCA7 locus, we previously introduced SCA7 CAG-92 repeat expansions into mice, either on 13. 5 kb ataxin-7 genomic fragments or on ataxin-7 cDNAs. Comparison of CAG repeat length change revealed that ataxin-7 genomic context drives repeat instability with an obvious bias toward expansion, while SCA7 CAG repeats introduced on ataxin-7 cDNAs were stable [13]. To localize the cis-acting elements responsible for this instability tendency, we derived lines of transgenic mice based upon the original 13. 5 kb ataxin-7 genomic fragment, deleting a large region (∼8. 3 kb) of human sequence beyond the 3′ end of the CAG tract (α-SCA7-92R construct). As deletion of the 3′ region in the α-SCA7-92R transgenic mice significantly stabilized the CAG-92 tract [13], we hypothesized that cis-elements within this 3′ region modify repeat instability at the SCA7 locus. To identify cis-acting instability elements at the SCA7 locus and the trans-acting proteins that regulate them, we evaluated the critical genomic region 3′ to the CAG repeat for sequences that might regulate genetic instability. In the case of SCA7 and a number of other highly unstable CAG/CTG repeat loci, including HD, DM1, SCA2, and dentatorubral-pallidoluysian atrophy, binding sites for a protein known as CTCF (i. e. the “CCCTC binding factor”) have been found [14]. CTCF is an evolutionarily conserved zinc-finger DNA binding protein with activity in chromatin insulation, transcriptional regulation, and genomic imprinting [15], [16]. As CTCF affects higher order chromatin structure [17], [18], we wondered if CTCF binding at the SCA7 locus might regulate CAG repeat instability. To test this hypothesis, we derived SCA7 genomic fragment transgenic mice with CTCF binding site mutations, and found that impaired CTCF binding yielded increases in both intergenerational and somatic instability at the SCA7 locus. Detection of increased somatic instability in association with hypermethylation of the CTCF binding site indicated a role for epigenetic regulation of SCA7 CAG repeat stability. Our results identify CTCF as an important modifier of repeat instability in SCA7, and suggest that CTCF binding may influence repeat instability at other tandem repeat expansion disease loci. At the SCA7 locus, there are two CTCF binding sites that flank the CAG repeat tract; the CTCF-I binding site is located 3′ to the CAG repeat (Figure S1), within the critical region deleted from the SCA7 genomic fragment in the α-SCA7-92R mice (Figure 1A). As CTCF binding sites are associated with highly unstable repeat loci [14], and CTCF binding can alter chromatin structure and DNA conformation [17], [18], we hypothesized that CTCF binding might be involved in SCA7 repeat instability. To test this hypothesis, we decided to compare SCA7 CAG repeat instability in mice carrying either the wild-type CTCF binding site or a mutant CTCF binding site that would be incapable of binding CTCF. To define the CTCF binding sites, we performed electrophoretic mobility shift assays to confirm that CTCF protein specifically binds to the putative CTCF-I binding site, and we found that both the CTCF DNA binding domain fragment and full-length CTCF protein bind to the SCA7 repeat locus 3′ region (Figure 1B). When we mapped the CTCF-I contact regions at the SCA7 repeat locus by methylation interference and DNA footprinting, we defined a region that is protected from DNase I treatment upon CTCF binding and subject to altered CTCF binding upon methylation treatment (Figure 1C). We then introduced point mutations at 11 nucleotides within this 3′ CTCF-I binding site, including eight contact nucleotides contained within the footprinted region (Figure 1C; Figure 1A, bottom). After confirming that CTCF binding was abrogated by these point mutations in electrophoretic mobility shift assays (Figure 1B), we derived a RL-SCA7 94R 13. 5 kb genomic fragment construct, that was identical to our original RL-SCA7 92R genomic fragment construct [13], except for: i) the presence of a mutant CTCF-I binding site, and ii) a minor repeat size increase to 94 CAG repeats. The RL-SCA7 94R CTCF-I-mutant construct was microinjected, and two independent lines of RL-SCA7 94R CTCF-I mutant transgenic mice were generated (hereafter referred to as the SCA7-CTCF-I-mut line mice – to distinguish them from the original RL-SCA7-92R transgenic mice with an intact CTCF-I binding site, hereafter referred to as the SCA7-CTCF-I-wt line mice). To assess in vivo occupancy of the CTCF-I binding site in SCA7-CTCF-I-wt and SCA7-CTCF-I-mut mice, we performed chromatin immunoprecipitation (ChIP) assays. To distinguish between the two CTCF binding sites, separated by a distance of 562 bp, we used two primer sets, including one extending 3′ to the CAG repeat. Quantitative PCR amplification with a primer set (‘A’) within ∼800 bp of the CTCF-I and CTCF-II sites yielded comparable CTCF occupancy in SCA7-CTCF-I-wt and -mut mice. As most sheared DNA fragments isolated by ChIP exceed 1 kb, intact CTCF-II sites and the primer set ‘A’ amplicon will be present in sheared DNA fragments isolated by ChIP from SCA7-CTCF-I-wt and -mut mice, accounting for comparable CTCF occupancy with primer set A. However, a significant reduction in CTCF occupancy at the CTCF-I site was observed in the SCA7-CTCF-I-mut mice for primer set B, which is closer to the CTCF-I binding site (at a distance of ∼700 bp) than the CTCF-II binding site (at a distance of ∼1,200 bp, thereby exceeding the size of most sheared DNA fragments isolated by ChIP) (Figure 1D; p = 0. 02, one-way ANOVA). Thus, ChIP analysis indicated that in vivo CTCF-I occupancy is significantly diminished in the cerebellum of SCA7-CTCF-I-mut mice. We assessed intergenerational repeat length instability in 3 month-old SCA7-CTCF-I-wt and SCA7-CTCF-I-mut mice by PCR amplification of the CAG repeat from tail DNAs, and found that mutation of the CTCF-I site destabilized the CAG repeat during intergenerational transmission (p = 0. 002, Mann-Whitney two-tailed test) (Figure 2A). Increased intergenerational instability in the SCA7-CTCF-I-mut mice was reflected by a broader range of repeat length change, as mean expansion and deletion sizes were greater for SCA7-CTCF-I-mut mice in comparison to SCA7-CTCF-I-wt mice (+4. 4 CAG' s/−4. 7 CAG' s vs. +2. 6 CAG' s/−2. 0 CAG' s). Analysis of repeat length instability between the two SCA7-CTCF-I-mut lines revealed similar intergenerational repeat instability (p = 0. 93, chi-square), and there was no difference in expansion bias between the two lines (p = 0. 25, chi-square). Thus, the SCA7-CTCF-I-mut mice did not show integration site effects, suggesting that increased instability in the two lineages results from altered CTCF binding. We then assessed germ line repeat instability by small-pool PCR of individual alleles in sperm DNAs from mice at age 2 months and 16 months (Figure 2B–C). As the mice aged, the CAG repeat in SCA7-CTCF-I-mut mice became increasingly unstable (p = 0. 009, Mann-Whitney two-tailed test), as mean expansion and deletion sizes were significantly greater for 16 month-old SCA7-CTCF-I-mut mice in comparison to SCA7-CTCF-I-wt mice (+24. 3 CAG' s/−15. 5 CAG' s (mut) vs. +9. 2 CAG' s/−1. 0 CAG (wt) ). Increasing CAG repeat instability with aging in SCA7-CTCF-I-mut mice suggests a role for CTCF in DNA instability during spermatogenesis, or for the male germ line-restricted CTCF-like paralogue (CTCFL), also known as brother of the regulator of imprinted sites, or ‘BORIS’ [19]. A potential role for CTCFL/BORIS in male germ line instability in the SCA7-CTCF-I-mut mice is plausible, as mutation of the SCA7-CTCF-I site also prevented binding of CTCFL/BORIS in electrophoretic mobility shift assays (Figure S2). Another intriguing feature of repeat instability is variation in repeat size within and between the tissues of an individual organism. This tissue-specific instability, or “somatic mosaicism”, occurs in human patients with repeat diseases, and in mouse models of repeat instability and disease [1], [8], [11]. While shown to be age-dependent, the mechanistic basis of inter-tissue variation, which even occurs in postmitotic neurons [20], is unknown. To determine if somatic CAG mosaicism at the SCA7 locus involves CTCF binding, we surveyed repeat instability in various tissues from SCA7-CTCF-I-wt and SCA7-CTCF-I-mut mice. At two months of age, the SCA7 CAG repeat was remarkably stable in all analyzed tissues (Figure 3A). However, by ∼10 months of age, SCA7-CTCF-I-wt and SCA7-CTCF-I-mut mice displayed large CAG repeat expansions in the cortex and liver (Figure 3B). The liver also exhibited a bimodal distribution of repeat size (i. e. two populations of cells with distinct tract lengths) (Figure 3B). The most pronounced somatic instability differences existed in the kidney, with large expansions for SCA7-CTCF-I-mut mice, but stable repeats in the SCA7-CTCF-I-wt mice (Figure 3B). This pattern of increased kidney and liver repeat instability was present in both SCA7-CTCF-I-mut transgenic lines (Figure 3B; Figure S3). Indeed, comparable somatic instability was also detected in both SCA7-CTCF-I-mut transgenic lines at five months of age (Figure S4). When we closely examined repeat instability in the cortex by small-pool PCR, we observed significantly different repeat sizes (p = 8. 6×10−5, Mann-Whitney), with a range of 39 to 152 CAG repeats in SCA7-CTCF-I-wt mice and 26 to 245 CAG repeats in SCA7-CTCF-I-mut mice (Figure 3C; Table 1). The increased somatic instability occurred in both SCA7-CTCF-I-mut transgenic lines, as an expansion bias was apparent in both lineages upon small-pool PCR analysis (Figure 3D; Table 1). These findings suggest that CTCF binding stabilizes the SCA7 CAG repeat in certain tissues. Thus, as noted for the germ line and documented for two independent lines of SCA7-CTCF-I-mut transgenic mice, SCA7 somatic CAG instability is dependent upon age and the presence of intact CTCF binding sites. CTCF binding can be regulated by CpG methylation, as methylation at CTCF recognition sites abrogates binding [16]. This finding was confirmed for un-methylated and methylated versions of the SCA7 CTCF-I recognition site (Figure 4A; Figure S5). Highly variable levels of instability have been documented in the kidneys of transgenic repeat instability mouse models [21], [22], although the reasons for pronounced instability in this tissue are unknown. Interestingly, one mouse with a wild-type CTCF-I binding site (SCA7-CTCF-I-wt) displayed marked CAG repeat instability in its kidney DNA (Figure 4B), paralleling the considerable instability observed in the SCA7-CTCF-I-mut mice (Figure 3B). Bisulfite sequencing of kidney DNA from this SCA7-CTCF-I-wt mouse revealed high levels of CpG methylation at the wild-type CTCF-I binding site, including the central CTCF contact site (Figure S6); whereas methylation was not observed in kidney DNAs from 14 other SCA7-CTCF-I-wt mice that displayed only modest levels of CAG instability (Figure 4C). The high levels of CAG instability and the CpG methylation in this mouse were restricted to the kidney, as the cerebellum and tail DNAs of the same mouse, which showed limited CAG instability (Figure 4B), were completely unmethylated (Figure 4C). This finding suggests a direct link between methylation status of the CTCF binding site and CAG repeat instability. Of all the tissues analyzed from SCA7-CTCF-I-wt mice, liver exhibits the greatest amount of somatic mosaisicm, with the largest repeat expansions (Figure 3B). We hypothesized that the high levels of CAG repeat instability in the liver of SCA7-CTCF-I-wt mice might result from methylation of the CTCF-I binding site. To address this question, we performed bisulfite sequencing analysis of liver DNAs from SCA7-CTCF-I-wt mice, and documented moderately high levels of methylation at the CTCF-I binding site (Figure 4D; Figure S7). These results indicate a correlation between CpG methylation and CAG repeat instability. Thus, in SCA7 transgenic mice, decreased CTCF binding, either by CpG methylation or mutagenesis of the CTCF-I binding site, enhanced CAG repeat instability. We have identified a CTCF binding site as the first cis-element regulating CAG tract instability at a disease locus. Furthermore, binding of the trans-factor CTCF to this cis-element influences CAG instability, and this interaction is epigenetically regulated. At the SCA7 locus and four other CAG/CTG repeat loci known to display pronounced anticipation, functional CTCF binding sites occur immediately adjacent to the repeats, and CTCF binding can affect DNA structure and chromatin packaging at such loci, and elsewhere [14], [23]–[26]. Although an interplay between GC-content, CpG islands, epigenetic modification, chromatin structure, repeat length, and unusual DNA conformation has long been postulated to underlie trinucleotide repeat instability [11], [27]–[29], the mechanistic basis of this process is ill-defined. CTCF insulator and genomic imprinting functions are subject to epigenetic regulation, as methylation status is a key determinant of CTCF action at certain “differentially methylated domains” and methylation changes at CTCF binding sites are linked to oncogenic transformation [16], [18]. At the SCA7 locus, methylation status of the CTCF-I binding site may be similarly important for its ability to tamp down repeat instability, as hypermethylation of the CTCF-I site was associated with a dramatic enhancement of somatic instability in the SCA7 genomic fragment transgenic mouse model. Thus, inability to bind CTCF at sites adjacent to CAG tracts, because of binding site mutation or CpG methylation in the case of the SCA7-CTCF-I site, can promote further expansion of disease-length CAG repeat alleles (Figure 5). In both human patients and transgenic mice with expanded repeat tracts, the repeat displays high levels of instability. The flanking sequence has been thought to contain elements that may protect or enhance repeat instability. Our results show that CTCF binding is a stabilizing force at the SCA7 repeat locus, suppressing expansion of the CAG repeat in the germ line and soma. Interestingly, deletion of ∼8. 3 kb of 3′ genomic sequence in our previous SCA7 transgenic mouse, including the CTCF-I site, stabilized the repeat [13]. The CAG-92 stabilization, arising from the ∼8. 3 kb 3′ genomic fragment deletion, suggests the existence of positive cis-regulators that were “driving” CAG instability. One such element could be a replication initiation site that was mapped within the genomic region 3′ to the CTCF-I binding site at the SCA7 locus [30]. Hence, the 8. 3 kb 3′ deletion could grossly alter the chromatin organization of the adjacent repeat, and would likely ablate replication origin activity, stabilizing the CAG repeat tract. However, this ∼8. 3 kb genomic region likely also contained negative cis-regulators of CAG repeat instability, whose dampening effects would not be apparent due to the coincident loss of instability drivers. Our results indicate that CTCF binding negatively regulates expanded CAG repeat instability at the SCA7 locus. CTCF regulation of repeat instability potential is consistent with its many roles in modulating DNA structure. CTCF can mediate long-range chromatin interactions and can co-localize physically distant genomic regions into discrete sub-nuclear domains [17], [18]. CTCF insulates heterochromatin and silenced genes from transcriptionally active genes, as CTCF binding sites occur at transition zones between X-inactivation regions and genes that escape from X-inactivation [24]. CTCF has been implicated in genomic imprinting, although recent studies indicate that such transcription insulator events may involve the coordinated action of CTCF with cohesin [31]–[33]. CTCF binding at the DM1 locus sequesters repeat-driven heterochromatin formation to the immediate repeat region, while repeat expansion-induced loss of CTCF binding may permit spreading of heterochromatin to adjacent genes, accounting for the mental retardation phenotype in congenital DM1 [23]. As DNA structural conformation and transcription activity are two highly intertwined processes that appear fundamental to the instability of expanded tandem repeats [10], [11], CTCF appears a likely candidate for modulation of trinucleotide repeat instability. At the SCA7 locus, a pronounced tendency for repeat expansion has been associated with transmission through the male germ line [3], [4], [34]. Although we have hypothesized that CTCF is principally responsible for modulating SCA7 CAG repeat instability both in the germ line and in the soma, we considered a possible role for the related CTCF-like factor BORIS. BORIS and CTCF share identical 11 zinc-finger domains for DNA binding [19]; hence, both CTCF and BORIS can bind to the CTCF binding sites at the SCA7 locus. Upon mutation or methylation of the CTCF binding site 3′ to the SCA7 CAG repeat, neither CTCF nor BORIS can bind (Figure 1C; Figure 4A; Figure S8). As BORIS can bind to the H19 differentially methylated domain even when it is methylated [35], our results suggest that the methylation dependence of BORIS binding is locus specific. BORIS and CTCF expression patterns overlap very little, if at all, and in the male germ line, BORIS appears restricted to primary spermatocytes, while CTCF occurs almost exclusively in post-meiotic cells, such as round spermatids [19]. Interestingly, neither BORIS nor CTCF could be detected by immunostaining proliferating spermatogonia. In human HD patients and transgenic mouse models of CTG/CAG instability, large repeat expansions have been documented in spermatogonia, but not in post-meiotic spermatids or spermatozoa [36]–[39]. Thus, absence or low levels of BORIS or CTCF in spermatogonia — the cells in which the largest and most frequent repeat expansions occur — may contribute to the paternal parent-of-origin expansion bias common to most CAG/CTG repeat diseases. In spermatocytes, BORIS may stabilize expanded CAG repeats, just as CTCF binding appears to promote repeat stability in somatic tissues. Thus, in the SCA7-CTCF-I-mut mice, abrogated binding of BORIS may contribute to increased repeat instability and expansion bias in the male germ line. Our findings suggest that CTCF is a trans-acting factor that specifically interacts in a methylation-dependent manner with the adjacent cis-environment to prevent hyper-expansion of disease length CAG repeats. In a Drosophila model of polyglutamine repeat disease, expression of the mutant gene product modulated repeat instability by altering transcription and repair pathways [10]. Similarly, uninterrupted repeat sequences, and in particular, runs of CG-rich trinucleotide repeats, can affect replication machinery, DNA repair pathways, and nucleosome positioning, though in cis, by altering the structure and conformation of the DNA regions within which they reside [40], [41]. Association of adjacent CTCF binding sites with repeat loci is a common feature of unstable microsatellite repeats [14]. We propose that acquisition of CTCF binding sites at mutational hot spots represents an evolutionary strategy for insulating noxious DNA sequences [42], and our findings indicate that CTCF binding site utilization at a mutational hot spot is subject to epigenetic regulation. We thus envision a predominant role for CTCF in modulating genetic instability at DNA regions containing variably-sized repeats, unstable sequence motifs, or other repetitive sequence elements. To derive the SCA7-CTCF-I-mut transgenic construct, we synthesized a PCR primer with randomly mutated nucleotides introduced at the CTCF-I contact sites for recombineering into the RL-SCA7-92R (SCA7-CTCF-I-wt) construct [13], and then confirmed loss of CTCF binding by the mutated fragment by electrophoretic mobility shift assay (protocol provided below). Using a standard recombineering approach [43], we PCR-generated a SCA7-CTCF-I targeting cassette containing a Chloramphenicol resistance gene and Cla I restriction site flanked by SCA7-CTCF-I region sequences with the following primer set: hSCA7-wt-CAM-F, 5′-tcccccctgcccccctcctgtatcgatgtttaagggcaccaataactgc-3′ & hSCA7-mut-CAM-R, 5′-catctctgcccctcgatttttatcgatatcgataatgatgagcacttttcgaccg-3′. After recombineering the SCA7-CTCF-I-mut targeting cassette into the SCA7-CTCF genomic fragment carried on a plasmid, selection, and PCR screening, we deleted the Chloramphenicol gene by Cla I digestion and ligation. We verified the sequence of the SCA7-CTCF-I-mut construct prior to linearization with Sal I – Spe I digestion, gel purification, and microinjection into C57BL/6J×C3H/HeJ oocytes. Transgene-positive founders were backcrossed onto the C57BL/6J background for more than 12 generations to yield incipient congenic mice before repeat instability analysis commenced. All experiments and animal care were performed in accordance with the University of Washington IACUC guidelines. We amplified a 161 bp DNA fragment (SCA7-CTCF-I) from the SCA7 locus with primers (5′-ctccccccttcaccccctcgagac-3′ & 5′-gtgacgcacactcacgcacgcacgg-3′) labeled at their 5′ ends by γ-32P-ATP. We gel-purified the 5′ end-labeled fragment, and used it for electrophoretic mobility shift assays, with in vitro translated proteins, as previously described [14]. We synthesized the CTCF-11 zinc finger (ZF) DNA binding domain, full length CTCF and full length CTCFL/BORIS proteins using the pCITE-11ZF, pCITE-7. 1, and pCITE-BORIS expression constructs [14], [19], [44], with the TnT reticulocyte lysate coupled in vitro transcription-translation system (Promega). For “super-shifts”, we used an anti-CTCF antibody (Upstate Biotechnology) or anti-BORIS antibody [19], [44]. We methylated the end-labeled SCA7-CTCF-I fragment with Sss I methyl-transferase (New England Biolabs) in the presence of 0. 8 mM S-adenosylmethionine. We confirmed the methylation status by restriction enzyme digestion with Nru I, and used unmethylated fragment as a control [14]. We PCR-amplified the SCA7-CTCF-I fragment and labeled it at the 5′ end on either the coding or anti-sense strand, incubated the purified probes with CTCF and then partially digested them with DNase I, or partially methylated them at guanine residues with dimethyl sulfate, and then incubated them with CTCF. Details of these protocols, as well as our methods for isolation and analysis of free probe DNA fragments on sequencing gels, have been described [14]. Bisulfite treatment of tissue DNAs was done as previously described [45], and PCR primers spanning the SCA7-CTCF-I region were designed so that they excluded CpG dinucleotides within the binding region. PCR products were then cloned into a Topo TA vector and sequenced. Sequencing of positive control samples, treated with Sss I to methylate all cytosines in CpG dyads, were included in every run, and revealed lack of C to T conversion at all CpG dyads in all control samples analyzed. We prepared tissues, cross-linked proteins to DNA, and processed tissue samples essentially as we have done previously [46]. However, we doubled the length of the sonication step, and, prior to immunoprecipitation, we fractionated supernatant DNAs on agarose gels to gauge the extent of shearing. After confirming that the bulk of sheared DNAs migrated in the 500–1,000 bp range, we performed immunoprecipitation with an anti-CTCF antibody (Upstate Biotechnology), as described [14]. DNAs were isolated and then subjected to real-time qPCR analysis with different SCA7 genomic region primer and probe sets (available upon request) on an ABI-7700 sequence detection system. For each CTCF ChIP sample, we normalized SCA7 locus occupancy to a control region of the Myc locus lacking CTCF binding sites [14]. All primer and probe sequence sets are available upon request. We PCR-amplified the SCA7 CAG repeat from genomic DNA samples in the presence of 0. 1µCi of α-32P-ATP, and resolved the radiolabeled PCR products on 1. 8% agarose gels [13]. For small-pool PCR, dilution of genomic DNA' s, yielding 1–5 genome equivalents, was performed prior to amplification and sizing [4]. In all experiments, at least three mice/genotype, or three samples/time point, were analyzed. All primer sequences are available upon request.
The human genome contains many repetitive sequences. In 1991, we discovered that excessive lengthening of a three-nucleotide (trinucleotide) repeat sequence could cause a human genetic disease. We now know that this unique type of genetic mutation, known as a "repeat expansion," occurs in at least 25 different diseases, including inherited neurological disorders such as the fragile X syndrome of mental retardation, myotonic muscular dystrophy, and Huntington' s disease. An interesting feature of repeat expansion mutations is that they are genetically unstable, meaning that the repeat expansion changes in length when transmitted from parent to offspring. Thus, expanded repeats violate one major tenet of genetics-i. e., that any given sequence has a low likelihood for mutation. For expanded repeats, the likelihood of further mutation approaches 100%. Understanding why expanded repeats are so mutable has been a challenging problem for genetics research. In this study, we implicate the CTCF protein in the repeat expansion process by showing that mutation of a CTCF binding site, next to an expanded repeat sequence, increases genetic instability in mice. CTCF is an important regulatory factor that controls the expression of genes. As binding sites for CTCF are associated with many repeat sequences, CTCF may play a role in regulating genetic instability in various repeat diseases-not just the one we studied.
lay_plos
Summarize: Background Interoperable communications is not an end in itself. Rather, it is a necessary means for achieving an important goal—the ability to respond effectively to and mitigate incidents that require the coordinated actions of first responders, such as multi-vehicle accidents, natural disasters, or terrorist attacks. Public safety officials have pointed out that needed interoperable communications capabilities are based on whether communications are needed for (1) “mutual-aid responses” or routine day- to-day coordination between two local agencies; (2) extended task force operations involving members of different agencies coming together to work on a common problem, such as the 2002 sniper attacks in the Washington, D.C. metropolitan area; or (3) a major event that requires response from a variety of local, state, and federal agencies, such as major wildfires, hurricanes, or the terrorist attacks of September 11, 2001. A California State official with long experience in public safety communications breaks the major event category into three separate types of events: (1) planned events, such as the Olympics, for which plans can be made in advance; (2) recurring events, such as major wildfires and other weather events, that can be expected every year and for which contingency plans can be prepared based on past experience; and (3) unplanned events, such as the September 11th attacks, that can rapidly overwhelm the ability of local forces to handle the problem. Interoperable communications are but one component, although a key one, of an effective incident command planning and operations structure. As shown in figure 1, determining the most appropriate means of achieving interoperable communications must flow from an comprehensive incident command and operations plan that includes developing an operational definition of who is in charge for different types of events and what types of information would need to be communicated (voice, data, or both) to whom under what circumstances. Other steps include: defining the range of interoperable communications capabilities needed for specific types of events; assessing the current capabilities to meet these communications needs; identifying the gap between current capabilities and defined requirements; assessing alternative means of achieving defined interoperable developing a comprehensive plan—including, for example, mutual aid agreements, technology and equipment specifications, and training—for closing the gap between current capabilities and identified requirements. Interoperable communications requirements are not static, but change over time with changing circumstances (e.g., new threats) and technology (e.g., new equipment), and additional available broadcast spectrum. Consequently, both a short- and long-term “feedback loop” that incorporates regular assessments of current capabilities and needed changes is important. In addition, the first responder community is extensive and extremely diverse in size and the types of equipment in their communications systems. According to SAFECOM officials, there are over 2.5 million public safety first responders within more than 50,000 public safety organizations in the United States. Local and state agencies own over 90 percent of the existing public safety communications infrastructure. This intricate public safety communications infrastructure incorporates a wide variety of technologies, equipment types, and spectrum bands. In addition to the difficulty that this complex environment poses for federal, state, and local coordination, 85 percent of fire personnel, and nearly as many emergency management technicians, are volunteers with elected leadership. Many of these agencies are small and do not have technical expertise; only the largest of the agencies have engineers and technicians. In the past, a stovepiped, single jurisdiction, or agency-specific communication systems development approach prevailed—resulting in none or less than desired interoperable communications systems. Public safety agencies have historically planned and acquired communications systems for their own jurisdictions without concern for interoperability. This meant that each state and local agency developed communications systems to meet their own requirements, without regard to interoperability requirements to talk to adjacent jurisdictions. For over 15 years, the federal government has been concerned with public safety spectrum issues, including communications interoperability issues. A variety of federal departments and agencies have been involved in efforts to define the problem and to identify potential solutions, such as the Department of Homeland Security (DHS), the Department of Justice (DOJ), the Federal Communications Commission (FCC), and the National Telecommunications and Information Agency (NTIA) within the Department of Commerce (DOC), among others. Today, a combination of federal agencies, programs, and associations are involved in coordinating emergency communications. DHS has several agencies and programs involved with addressing first responder interoperable communication barriers, including the SAFECOM program, the Federal Emergency Management Agency (FEMA), and the Office for Domestic Preparedness (ODP). As one of its 24 E-Gov initiatives, the Office of Management and Budget (OMB) in 2001 created SAFECOM to unify the federal government’s efforts to help coordinate the work at the federal, state, local, and tribal levels to establish reliable public safety communications and achieve national wireless communications interoperability. The SAFECOM program was brought into DHS in early 2003. In June 2003, SAFECOM partnered with the National Institute of Standards and Technology (NIST) and the National Institute of Justice (NIJ) to hold a summit that brought together over 60 entities involved with communications interoperability policy setting or programs. Several technical factors specifically limit interoperability of public safety wireless communications systems. First, public safety agencies have been assigned frequencies in new bands over time as available frequencies become congested and as new technology made other frequencies available for use. As a result, public safety agencies now operate over multiple frequency bands—operating on these different bands required different radios because technology was not available to include all bands in one radio. Thus, the new bands provided additional capabilities but fragmented the public safety radio frequency spectrum, making communications among different jurisdictions difficult. Another technical factor inhibiting interoperability is the different technologies or different applications of the same technology by manufacturers of public safety radio equipment. One manufacturer may design equipment with proprietary technology that will not work with equipment produced by another manufacturer. Nature and Scope of Interoperable Communication Problems Nationwide Are Unknown The current status of wireless interoperable communications across the nation—including the current interoperable communications capabilities of first responders and the scope and severity of the problems that may exist—has not been determined. Although various reports have documented the lack of interoperability of public safety first responders wireless communications in specific locations, complete and current data do not exist documenting the scope and severity of the problem at the local, state, interstate, or federal levels across the nation. Accumulating this data may be difficult, however, because several problems inhibit efforts to identify and define current interoperable communications capabilities and future requirements. First, current capabilities must be measured against a set of requirements for interoperable communications, and these requirements vary according to the characteristics of specific incidents at specific locations. Who needs to talk to whom, when they need to talk, and what set of communications capabilities should be built or acquired to satisfy these requirements depends upon whether interoperable communications are needed for day- to-day mutual aid, task force operations that occur when members of different agencies come together to work on a common problem such as the National Capitol Region sniper investigation, or major events such as a terrorist attack. Requirements for interoperable communications also may change with the expanding definition of first responders—from the traditional police, fire, and emergency medical providers to include such professions as health care providers and other professions—and the evolution of new technology. Establishing a national baseline for public safety wireless communications interoperability will be difficult because the definition of who to include as a first responder is evolving, and interoperability problems and solutions are situation specific and change over time to reflect new technologies and operational requirements. In a joint SAFECOM/AGILE program planning meeting in December 2003, participants agreed that a national baseline is necessary to know what the nation’s interoperability status really is, to set goals, and to measure progress. However, at the meeting, participants said they did not know how they were going to define interoperability, how they could measure interoperability, or how to select their sample of representative jurisdictions; this was all to be determined at a later date. SAFECOM has embarked on an effort to establish a national baseline of interoperable communications capabilities by July 2005, but SAFECOM is still working out the details of the study that would be used to develop the baseline. At the time of our review, SAFECOM officials acknowledged that establishing a baseline will be difficult and said they are working out the details of their baseline study but still expect to complete it by July 2005. DHS also has other work under way that may provide a tool for such self- assessments by public safety officials. An ODP official in the Border and Transportation Security Directorate of DHS said ODP is supporting the development of a communications and interoperability needs assessment for 118 jurisdictions that make up the Kansas City region. The official said the assessment will provide an inventory of communications equipment and identify how the equipment is used. He also said the results of this prototype effort will be placed on a CD-Rom and distributed to states and localities to provide a tool to conduct their own self assessments. SAFECOM officials said they will review ODP’s assessment tool as part of a coordinated effort and use this tool if it meets the interoperability requirements of first responders. Second, technical standards for interoperable communications are still under development. Beginning in 1989, a partnership between industry and the public safety user community developed what is known as Project 25 (P- 25) standards. According to the Public Safety Wireless Network (PSWN) program office, Project 25 standards remain the only user- defined set of standards in the United States for public safety communications. DHS purchased radios that incorporate the P-25 standards for each of the nation’s 28 urban search and rescue teams. PSWN believes P-25 is an important step toward achieving interoperability, but the standards do not mandate interoperability among all manufacturers’ systems. Standards development continues today as new technologies emerge that meet changing user needs and new policy requirements. Third, new public safety mission requirements for video, imaging, and high-speed data transfers, new and highly complex digital communications systems, and the use of commercial wireless systems are potential sources of new interoperability problems. Availability of new spectrum can also encourage the development of new technologies and require further development of technical standards. For example, the FCC recently designated a new band of spectrum, the 4.9 Gigahertz (GHz) band, for use and support of public safety. The FCC provided this additional spectrum to public safety users to support new broadband applications such as high- speed digital technologies and wireless local area networks for incident scene management. The FCC requested in particular comments on the implementation of technical standards for fixed and mobile operations on the band. NPSTC has established a task force that includes work on interoperability standards for the 4.9 GHz band. Federal Leadership and Intergovernmental Cooperation Is Needed The federal government, states, and local governments have important roles to play in assessing interoperability needs, identifying gaps in meeting those needs, and developing comprehensive plans for closing those gaps. The federal government can provide the leadership, long-term commitment, and focus to help state and local governments meet these goals. For example, currently national requirements for interoperable communications are incomplete and no national architecture exists, there is no standard database to coordinate frequencies, and no common nomenclature or terminology exists for interoperability channels. States alone cannot develop the requirements or a national architecture, compile the nationwide frequency database, or develop a common nationwide nomenclature. Moreover, the federal government alone can allocate communications spectrum for public safety use. Need to Establish National Requirements and a National Architecture One key barrier to the development of a national interoperability strategy has been the lack of a statement of national mission requirements for public safety—what set of communications capabilities should be built or acquired—and a strategy to get there. A key initiative in the SAFECOM program plan for the year 2005 is to complete a comprehensive Public Safety Statement of Requirements. The Statement is to provide functional requirements that define how, when, and where public safety practitioners communicate. On April 26, 2004, DHS announced the release of the first comprehensive Statement of Requirements defining future communication requirements and outlining future technology needed to meet these requirements. According to DHS, the Statement provides a shared vision and an architectural framework for future interoperable public safety communications. DHS describes the Statement of Requirements as a living document that will define future communications services as they change or become new requirements for public safety agencies in carrying out their missions. SAFECOM officials said additional versions of the Statement will incorporate whatever is needed to meet future needs but did not provide specific details. A national architecture has not yet been prepared to guide the creation of interoperable communications. An explicit, commonly understood, and agreed-to blueprint, or enterprise architecture, is required to effectively and efficiently guide modernization efforts. For a decade, GAO has promoted the use of enterprise architectures, recognizing them as a crucial means to a challenging goal—agency operational structures that are optimally defined in both business and technological environments. SAFECOM officials said development of a national architecture will take time because SAFECOM must first assist state and local governments to establish their communications architectures. They said SAFECOM will then collect the state and local architectures and fit them into a national architecture that links federal communications into the state and local infrastructure. Standard Databases and Common Nomenclature Not Yet Established Technology solutions by themselves are not sufficient to fully address communication interoperability problems in a given local government, state, or multi-state region. State and local officials consider a standard database of interoperable communications frequencies to be essential to frequency planning and coordination for interoperability frequencies and for general public safety purposes. Police and fire departments often have different concepts and doctrines on how to operate an incident command post and use interoperable communications. Similarly, first responders, such as police and fire departments, may use different terminology to describe the same thing. Differences in terminology and operating procedures can lead to communications problems even where the participating public safety agencies share common communications equipment and spectrum. State and local officials have drawn specific attention to problems caused by the lack of common terminology in naming the same interoperability frequency. The Public Safety National Communications Council (NCC), appointed by the Federal Communications Commission (FCC) was to make recommendations for public safety use of the 700 MHz communications spectrum. The NCC recommended that the FCC mandate (1) Regional Planning Committee use of a standard database to coordinate frequencies during license applications and (2) specific names be designated for each interoperability channel on all pubic safety bands. The NCC said that both were essential to achieve interoperability because public safety officials needed to know what interoperability channels were available and what they were called. In January 2001, the FCC rejected both recommendations. It said that the first recommendation was premature because the database had not been fully developed and tested. The FCC directed the NCC to revisit the issue of mandating the database once the database was developed and had begun operation. The FCC rejected the common nomenclature recommendation because it said that it would have to change the rules each time the public safety community wished to revise a channel label. In its final report of July 25, 2003, the NCC renewed both recommendations. It noted that the FCC had received a demonstration of a newly developed and purportedly operational database, the Computer Assisted Pre-Coordination Resource and Database System (CAPRAD), and that its recommendations were consistent with previous FCC actions, such as the FCC’s designating medical communications channels for the specifc purpose of uniform useage. Converting SAFECOM’s Functions To A Long-Term Program In 2001, the Office of Management and Budget (OMB) established SAFECOM to unify the federal government’s efforts to help coordinate work at the federal, state, local, and tribal levels in order to provide reliable public safety communications and achieve national wireless communications interoperability. However, SAFECOM was established as an OMB E-Gov initiative with a goal of improving interoperable communications within 18-24 months—a timeline too short for addressing the complex, long-term nature of the interoperability problem. In addition, the roles and responsibilities of various federal agencies within and outside DHS involved in communications interoperability have not been fully defined, and SAFECOM’s authority to oversee and coordinate federal and state efforts has been limited in part because it has been dependent upon other federal agencies for cooperation and funding and has operated without signed memorandums of understanding negotiated with various agencies. DHS, where SAFECOM now resides, announced in May 2004 that it had created an Office for Interoperability and Compatibility within the Science and Technology Directorate, to coordinate the federal response to the problems of wireless and other functional interoperability and compatibility. The new office is responsible for coordinating DHS efforts to address interoperability and compatibility of first responder equipment, to include both communications equipment and equipment such as personal protective equipment used by police and fire from multiple jurisdictions. The plan as approved by the Secretary of DHS states that by November 2004 the new office will be fully established and that action plans and a strategy will be prepared for each portfolio (type or class of equipment). The plan presents a budget estimate for creation of the office through November 2004 but does not include costs to implement each portfolio’s strategy. The plans for the new office do not clarify the roles of various federal agencies or specify what oversight authority the new office will have over federal agency communications programs. As of June 2004, the exact structure and funding for the office, including SAFECOM’s role within the office, were still being developed. Multiple Federal Agencies Have Roles And Responsibilities For Interoperability DHS has not defined how it will convert the current short-term program and funding structures to a permanent program office structure. When it does, DHS must carefully define the SAFECOM mission and roles in relation to other agencies within DHS and in other federal agencies that have missions that may be related to the OMB-assigned mission for SAFECOM. SAFECOM must coordinate with multiple federal agencies, including ODP within DHS, AGILE and the Office for Community Oriented Policing Services (COPS) in DOJ, the Department of Defense, the FCC, the National Telecommunications and Information Administration within the Department of Commerce, and other agencies. For example, AGILE is the DOJ program to assist state and local law enforcement agencies to effectively and efficiently communicate with one another across agency and jurisdictional boundaries. The Homeland Security Act assigns the DHS Office for Domestic Preparedness (ODP) primary responsibility within the executive branch for preparing the United States for acts of terrorism, including coordinating or, as appropriate, consolidating communications and systems of communications relating to homeland security at all levels of government. An ODP official said the Homeland Security Act granted authority to ODP to serve as the primary agency for preparedness against acts of terrorism, to specifically include communications issues. He said ODP is working with states and local jurisdictions to institutionalize a strategic planning process that assesses and funds their requirements. ODP also plans to develop tools to link these assessments to detailed interoperable communications plans. SAFECOM officials also will face a complex issue when they address public safety spectrum management and coordination. The National Telecommunications and Information Administration (NTIA) within the Department of Commerce is responsible for federal government spectrum use and the FCC is responsible for state, local, and other nonfederal spectrum use. The National Governors’ Guide to Emergency Management noted that extensive coordination will be required between the FCC and the NTIA to provide adequate spectrum and to enhance shared local, state, and federal communications. In September 2002, GAO reported that FCC and NTIA’s efforts to manage their respective areas of responsibility were not guided by a national spectrum strategy and had not implemented long- standing congressional directives to conduct joint, national spectrum planning. The FCC and the NTIA generally agreed with our recommendation that they develop a strategy for establishing a clearly defined national spectrum plan and submit a report to the appropriate congressional committees. In a separate report, we also discussed several barriers to reforming spectrum management in the United States. On June 24, 2004, the Department of Commerce released two reports entitled Spectrum Policy for the 21st Century, the second of which contained recommendations for assessing and managing public safety spectrum. SAFECOM’s Authority To Coordinate Federal And State Efforts Is Limited SAFECOM has limited authority to coordinate federal efforts to assess and improve interoperable communications. Although SAFECOM has developed guidance for use in federal first responder grants, SAFECOM does not have authority to require federal agencies to coordinate their grant award information. SAFECOM is currently engaged in an effort with DOJ to create a “collaborative clearinghouse” that could facilitate federal oversight of interoperable communications funding to jurisdictions and allow states access to this information for planning purposes. The database is intended to decrease duplication of funding and evaluation efforts, de-conflict the application process, maximize efficiency of limited federal funding, and serve as a data collection tool for lessons learned that would be accessible to state and locals. However, SAFECOM officials said that the challenge to implementing the coordinated project is getting federal agency collaboration and compliance. As of February 2004, the database contained award information from the 2003 COPS and FEMA interoperability communications equipment grants, but no others within or outside DHS. SAFECOM’s oversight authority and responsibilities are dependant upon its overall mission. OMB officials told us that they are currently in the process of refocusing the mission of the SAFECOM program into three specific parts: (1) coordination of federal activities through several initiatives, including participation in the Federal Interagency Coordination Council and establishment of a process for federal agencies to report and coordinate with SAFECOM on federal activities and investments in interoperability; (2) developing standards; and (3) developing a national architecture for addressing communications interoperability problems. They said identification of all current and planned federal agency communications programs affecting federal, state, and local wireless interoperability is difficult. According to these officials, OMB is developing a strategy to best utilize the SAFECOM program and examining options to enforce the new coordination and reporting process. SAFECOM officials said they are working to formalize the new reporting and coordination process by developing written agreements with other federal agencies and by obtaining concurrence of major state and local associations to the SAFECOM governance structure. SAFECOM officials noted that this newly refocused SAFECOM role does not include providing technical assistance or conducting operational testing of equipment. They said that their authority to conduct such activities will come from DHS enabling directives. SAFECOM officials also said that they have no enforcement authority to require other agencies to use the SAFECOM grant guidance in their funding decisions or to require agencies to provide grant program information to them for use in their database. State and Local Governments Can Play a Central Role States, with broad input from local governments, can serve as focal points for statewide planning to improve interoperable communications. The FCC has recognized the important role of states. In its rules and procedures, the FCC concluded that because states play a central role in managing emergency communications and are usually in control at large scale-events and disasters, states should administer the interoperability channels within the 700 MHz band of communications spectrum. States can play a key role in improving interoperable communications by establishing a management structure that includes local participation and input to analyze and identify interoperability gaps between “what is” and “what should be,” developing comprehensive local, state, and regional plans to address such gaps, and funding these plans. The states we visited or contacted—California, Florida, Georgia, Missouri, Washington and a five state Midwest consortium—were in various stages of formulating these management structures. However, states are not required to establish a statewide management structure or to develop interoperability plans, and there is no clear guidance on what should be included in such plans. In addition, no requirement exists that interoperability of federal communications systems be coordinated with state and local government communications systems. The use of a standard database on communications frequencies by public safety agencies within the state and common terminology for these frequencies in preparation and implementation of these statewide interoperable plans are essential but are also not required. Without planning, coordination, and applicable standards—in other words, without a commonly understood and accepted blueprint or national architecture—the communications systems developed between and among locations and levels of government may not be interoperable. States are key players in responding to normal all-hazards emergencies and to terrorist threats. Homeland Security Presidential Directive 8 notes that awards to states are the primary mechanism for delivery of federal preparedness assistance for these missions. State and local officials also believe that states, with broad local and regional participation, have a key role to play in coordinating interoperable communications supporting these missions. The Public Safety Wireless Network (PSWN), in its report on the role of the state in providing interoperable communications, agreed. According to the PSWN report, state leadership in public safety communications is key to outreach efforts that emphasize development of common approaches to regional and statewide interoperability. The report said that state officials have a vested interest in establishing and protecting statewide wireless infrastructures because public safety communications often must cross more than one local jurisdictional boundary. However, states are not required to establish a statewide capability to (1) integrate statewide and regional interoperability planning and (2) prepare statewide interoperability plans that maximize use of spectrum to meet interoperability requirements of day-to-day operations, joint task force operations, and operations in major events. Federal, state, and local officials are not required to coordinate federal, state, and local interoperability spectrum resources that, if successfully addressed, have significant potential to improve public safety wireless communications interoperability. As a result, states may not prepare comprehensive and integrated statewide plans that address the specific interoperability issues present in each state across first responder disciplines and levels of government. Several state and local agencies that we talked with emphasized that they are taking steps to address the need for statewide communications planning. State officials also told us that statewide interoperability is not enough because incidents first responders face could cross state boundaries. Thus, some states are also taking actions to address interstate interoperability problems. For example, Illinois, Indiana, Kentucky, Michigan, and Ohio officials said that their states have combined efforts to form the Midwest Public Safety Communications Consortium to promote interstate interoperability. According to these officials, they also have taken actions to form an interstate committee to develop interoperability plans and solicit support from key players, such as local public safety agencies. Statewide Interoperable Communications Committees Offer Potential for Coordinated Statewide Planning FCC recognized a strong state interest in planning and administering interoperability channels for public safety wireless communications when it adopted various technical and operational rules and polices for the 700 MHz band. In these rules and policies, FCC concluded that administration of the 2.6 MHz of interoperability channels in that band (approximately 10 percent) should occur at the state-level in a State Interoperability Executive Committee (SIEC). FCC said that states play a central role in managing emergency communications and that state-level organizations are usually in control at large-scale events and disasters or multi-agency incidents. FCC also found that states are usually in the best position to coordinate with federal government emergency agencies. FCC said that SIEC administrative activities could include holding licenses, resolving licensing issues, and developing a statewide interoperability plan for the 700 MHz band. Other SIEC responsibilities could include the creation and oversight of incident response protocols and the creation of chains of command for incident response and reporting. Available data indicate that 12 to 15 states did not create SIECs but have relied on Regional Planning Committees or similar planning bodies. Content and Scope of Statewide Interoperability Plans Not Established A comprehensive statewide interoperable plan can provide the guiding framework for achieving defined goals for interoperability within a state and for regions within and across states (such as Kansas City, Mo and Kansas City, Kans.). NCC recommended that all SIECs prepare an interoperability plan that is filed with FCC and updated when substantive changes are made or at least every three years. NCC also recommended to FCC that SIECs, for Homeland Security reasons, should administer all interoperability channels in a state, not merely those in the 700 MHz band. According to NCC, each state should have a central point identified for information on a state’s interoperability capability. None of the four states we visited had finished preparation and funding of their state interoperability plans. Washington and Florida were preparing statewide interoperability plans at the time we visited. Georgia officials said they have a state interoperability plan but that it is not funded. However, one other state we contacted, Missouri, has extended SIEC responsibility for interoperability channels beyond the 700 MHz band. The Missouri SIEC has also designated standard operational and technical guidelines as conditions for the use of these bands. SIEC requires applicants to sign a MOU agreeing to these conditions in order to use these channels in the state of Missouri. The Missouri SIEC Chairman said the state developed its operational and technical guidelines because FCC had not established its own guidelines for these interoperability channels in the VHF and UHF bands. The chairman said Missouri borders on eight other states and expressed concern that these states will develop different guidelines that are incompatible with the Missouri guidelines. He said FCC was notified of Missouri’s actions but has not taken action to date. In another example, California intends to prepare a statewide interoperability plan. California’s SIEC is re-examining California’s previous stove piped programs of communications interoperability (separate systems for law enforcement, fire, etc.) in light of the need to maintain tactical channels within disciplines while promoting cross-discipline interoperability. Coordination of Federal and State Interoperable Frequencies in Statewide Plans FCC designated frequency coordinators told FCC that planning for interoperability channels should include federal spectrum designated for interoperability with state and local governments. We found several examples in our field work that support inclusion of federal agencies in future state and local planning for interoperable communications. For example, a Washington State official told us that regional systems within the state do not have links to federal communications systems and assets. In another example, according to an emergency preparedness official in Seattle, a study of radio interoperable communications in a medical center also found that federal agencies such as FBI are not integrated into hospital or health communications systems, and other federal agencies have no radio infrastructure to support and participate in a health emergency such as a bio-terrorism event. He told us that he has no idea what the federal communications plan is in the event of a disaster; he said he does not know how to talk to federal health officials responding to an incident or what the federal government needs when they arrive. The federal government is developing a system that could improve interoperable communications on a limited basis between state and federal government agencies. The Integrated Wireless Network (IWN) is a radio system that is intended to replace the existing radio systems for the DOJ, Treasury, and DHS. IWN is an exclusive federal law enforcement communications system that is intended to interact and interface with state and local systems as needed but will not replace these systems. According to DOJ officials, IWN is intended to improve federal to state/ local interoperability but will not address interoperability of state and local systems. However, federal interoperability with state and local wireless communications systems is hindered because NTIA and FCC control different frequencies in the VHF and UHF bands. To enhance interoperability, NTIA has identified 40 federal government frequencies that can be used by state and local public safety agencies for joint law enforcement and incident response purposes. FCC, however, designated different frequencies for interoperability in the VHF band and in the UHF band from spectrum it controls for use by state and local public safety agencies. Federal Grant Structure Does Not Support Statewide Planning Total one-time replacement of the nation’s communications systems is very unlikely, due to the costs involved. A 1998 study cited the replacement value of the existing public safety communication infrastructure nationwide at $18.3 billion. DHS officials said this estimate is much higher when infrastructure and training costs are taken into account. Furthermore, DHS recently estimated that reaching an accelerated goal of communications interoperability will require a major investment of several billion dollars within the next 5 to 10 years. As a result of these extraordinary costs, federal funding is but one of several resources state and local agencies must use in order to address these costs. Furthermore, given the high costs, the development of an interoperable communications plan is vital to useful, non-duplicative spending. However, the federal funding assistance programs to state and local governments do not fully support regional planning for communications interoperability. Federal grants that support interoperability have inconsistent requirements to tie funding to interoperable communications plans. In addition, uncoordinated federal and state level grant reviews limit the government’s ability to ensure that federal funds are used to effectively support improved regional and statewide communications systems. Local, state and federal officials agree that regional communications plans should be developed to guide decisions on how to use federal funds for interoperable communications; however, the current funding requirements do not support this planning process. Although recent grant requirements have encouraged jurisdictions to take a regional approach to planning, current federal first responder grants are inconsistent in their requirements to tie funding to interoperable communications plans. States and locals are not required to provide an interoperable communications plan as a prerequisite to receiving some federal grant funds. As a result, there is no assurance that federal funds are being used to support a well- developed strategy for improving interoperability. For example, the fiscal year 2004 Homeland Security Grant (HSG) and Urban Areas Security Initiative (UASI) grants require states or selected jurisdictions to conduct a needs assessment and submit a Homeland Security Strategy to ODP. However, the required strategies are high-level and broad in nature. They do not require that project narratives or a detailed communications plan be submitted by grantees prior to receiving grant funds. In another example, fiscal year 2003 funding provided by COPS and FEMA for the Interoperable Communications Equipment Grants did not require that a communications plan be completed prior to receiving grant funds. However, grantees were required to provide documentation that they were actively engaged in a planning process and a multi-jurisdictional and multidisciplinary project narrative was required. In addition to variations in requirements to create communications interoperability plans, federal grants also lack consistency in defining what “regional” body should conduct planning. Grant Submissions and Performance Period Time Frames Also Present Challenges to Short- and Long-Term Planning State and local officials also said that the short grant application deadlines for recent first responder grants limited their ability to develop cohesive communications plans or perform a coordinated review of local requests. Federal officials acknowledged that the limited submission timeframes presents barriers to first responders for developing plans prior to receiving funds. For example, several federal grant programs—the Homeland Security Grant, UASI grant, COPs and FEMA communication equipment grants, Assistance to Firefighters Grant—allow states only 30 or 60 days from the date of grant announcement to submit a grant proposal. These time frames are sometimes driven by appropriations language or by the timing of the appropriations enactment. Furthermore, many grants have been awarded to state and locals for communications interoperability that have 1- or 2-year performance periods, and according to state and local officials, do not support long-term solutions. For example, Assistance to Fire Fighters Grants, COPS/ FEMA’s Interoperable Communications Equipment Grants, and National Urban Search and Rescue grants all have 1-year performance periods. UASI, HSG program, and Local Law Enforcement Block Grants have 2-year performance periods. No Coordinated Federal or State Grant Review Exists to Ensure Funds are Used to Improve Regional or Statewide Communications Interoperability The federal and state governments lack a coordinated grant review process to ensure that funds allocated to local governments are used for communication projects that complement each other and add to overall statewide and national interoperability. Federal and state officials said that each agency reviews its own set of applications and projects, without coordination with other agencies. As a result, grants could be given to bordering jurisdictions that propose conflicting interoperability solutions. In fiscal year 2003, federal officials from COPS and FEMA attempted to eliminate awarding funds to conflicting communication systems within bordering jurisdictions by coordinating their review of interoperable communications equipment grant proposals. However, COPS and FEMA are only two of several federal sources of funds for communications interoperability. In an attempt to address this challenge, in 2003 SAFECOM coordinated with other agencies to create the document Recommended Federal Grant Guidance, Public Safety Communications and Interoperability Grants, which lays out standard grant requirements for planning, building, and training for interoperable communications systems. The guidance is designed to advise federal agencies on who is eligible for the first responder interoperable communications grants, the purposes for which grant funds can be used, and eligibility specifications for applicants. The guidance recommends standard minimum requirements, such as requirements to “…define the objectives of what the applicant is ultimately trying to accomplish and how the proposed project would fit into an overall effort to increase interoperability, as well as identify potential partnerships for agreements.” Additionally, the guidance recommends, but does not require, that applicants establish a governance group consisting of local, tribal, state, and federal entities from relevant public safety disciplines and purchase interoperable equipment that is compliant with phase one of Project-25 standards. The House Committee on Appropriations report for the DHS FY 2004 appropriation states that the Committee is aware of numerous federal programs addressing communications interoperability through planning, building, upgrading, and maintaining public safety communication systems, among other purposes. The Committee directed that all DHS grant programs issuing grants for the above purposes incorporate the SAFECOM guidance and coordinate with the SAFECOM program when awarding funding. To better coordinate the government’s efforts, the Committee also encouraged all other federal programs issuing grants for the above purposes to use the guidelines outlined by SAFECOM in their grant programs. However, SAFECOM officials said that they have no enforcement authority to require other agencies to use this guidance in their funding decisions or to require agencies to provide grant program information to them for use in their database. Conclusions A fundamental barrier to successfully addressing interoperable communications problems for public safety has been the lack of effective, collaborative, interdisciplinary, and intergovernmental planning. Jurisdictional boundaries and unique public safety agency missions have often fostered barriers that hinder cooperation and collaboration. No one first responder agency, jurisdiction, or level of government can “fix” the nation’s interoperability problems, which vary across the nation and often cross first responder agency and jurisdictional boundaries. Changes in spectrum available to federal, state and local public safety agencies— primarily a federal responsibility conducted through the FCC and NTIA— changes in technology, and the evolving missions and responsibilities of public safety agencies in an age of terrorism all highlight the ever-changing environment in which interoperable communications needs and solutions must be addressed. Interdisciplinary, intergovernmental, and multi- jurisdictional partnership and collaboration are essential for effectively addressing interoperability shortcomings. Recommendations We are making recommendations to DHS and OMB to improve the assessment and coordination of interoperable communications efforts. We recommend that the Secretary of DHS: in coordination with the FCC and National Telecommunications and Information Administration, continue to develop a nationwide database of public safety frequency channels and a standard nationwide nomenclature for these channels, with clear target dates for completing both efforts; establish requirements for interoperable communications and assist states in assessing interoperability in their states against those requirements; through DHS grant guidance encourage states to establish a single, statewide body to assess interoperability and develop a comprehensive statewide interoperability plan for federal, state, and local communications systems in all frequency bands; and at the appropriate time, require through DHS grant guidance that federal grant funding for communications equipment shall be approved only upon certification by the statewide body responsible for interoperable communications that grant applications for equipment purchases conform with statewide interoperability plans. We also recommend that the Director of OMB, in conjunction with DHS, review the interoperability mission and functions now assigned to SAFECOM and establish those functions as a long-term program with adequate authority and funding. In commenting on a draft of this report, the Department of Homeland Security discusses actions the department is taking that are generally consistent with the intent of our recommendations but do not directly address specific steps detailed in our recommendations with respect to establishment of statewide bodies responsible for interoperable communications within the state, the development of comprehensive statewide interoperability plans and tying federal funds for communications equipment directly to those statewide interoperable plans. OMB did not provide written comments on the draft report. This concludes my prepared statement, Mr. Chairman, and I would be pleased to answer any questions you or other members of the Subcommittee my have at this time. This is a work of the U.S. government and is not subject to copyright protection in the United States. It may be reproduced and distributed in its entirety without further permission from GAO. However, because this work may contain copyrighted images or other material, permission from the copyright holder may be necessary if you wish to reproduce this material separately.
Lives of first responders and those whom they are trying to assist can be lost when first responders cannot communicate effectively as needed. This report addresses issues of determining the status of interoperable wireless communications across the nation, and the potential roles that federal state, local governments can play in improving these communications. In a November 6, 2003, testimony, GAO said that no one group or level of government could "fix" the nation's interoperable communications problems. Success would require effective, collaborative, interdisciplinary and intergovernmental planning. The present extent and scope nationwide of public safety wireless communication systems' ability to talk among themselves as necessary and authorized has not been determined. Data on current conditions compared to needs are necessary to develop plans for improvement and measure progress over time. However, the nationwide data needed to do this are not currently available. The Department of Homeland Security (DHS) intends to obtain this information by the year 2005 by means of a nationwide survey. However, at the time of our review, DHS had not yet developed its detailed plans for conducting this survey and reporting its results. The federal government can take a leadership role in support of efforts to improve interoperability by developing national requirements and a national architecture, developing nationwide databases, and providing technical and financial support for state and local efforts to improve interoperability. In 2001, the Office of Management and Budget (OMB) established the federal government's Wireless Public Safety Interoperable Communications Program, SAFECOM, to unify efforts to achieve national wireless communications interoperability. However, SAFECOM's authority and ability to oversee and coordinate federal and state efforts has been limited by its dependence upon other agencies for funding and their willingness to cooperate. OMB is currently examining alternative methods to implement SAFECOM's mission. In addition, DHS, where SAFECOM now resides, has recently announced it is establishing an Office for Interoperability and Compatibility to coordinate the federal response to the problems of interoperability in several functions, including wireless communications. The exact structure and funding for this office, which will include SAFECOM, are still being developed. State and local governments can play a large role in developing and implementing plans to improve public safety agencies' interoperable communications. State and local governments own most of the physical infrastructure of public safety communications systems, and states play a central role in managing emergency communications. The Federal Communications Commission recognized the central role of states in concluding that states should manage the public safety interoperability channels in the 700 MHz communications spectrum. States, with broad input from local governments, are a logical choice to serve as a foundation for interoperability planning because incidents of any level of severity originate at the local level with states as the primary source of support. However, states are not required to develop interoperability plans, and there is no clear guidance on what should be included in such plans.
gov_report
Summarize: RELATED APPLICATIONS [0001] This application claims the benefit of Japanese Application No. JP2002-361900, filed Dec. 13, 2002, which is incorporated by reference herein in its entirety. TECHNICAL FIELD OF THE INVENTION [0002] The present invention relates to a film forming agent for coating plants and a method for supplying ingredients to plants using such film forming agent. BACKGROUND [0003] For plants such as vegetables and fruits, the proper taste and flavor of the plant is only obtained when a sufficient amount of the nutrients appropriate for the respective plant have been supplied in its growth process. [0004] Currently, the appropriate nutrients are obtained from the soil where plants are cultivated. However, the soil has a tendency to become barren because of continuous cultivation and deterioration of land environment, etc., and it is becoming more difficult to supply sufficient amounts of such appropriate nutrients in recent years. For example, these days the soil often lacks proper mineral ingredients and therefore the taste of vegetables and fruits often lacks flavor and sweetness. In addition, nutrients supplied from compost don&#39;t always supplement the soils adequately. [0005] Thus, to supply mineral nutrients to plants, one conventional method is to pump up deep-sea water, dilute it with water, and spray it over the surface of plant leaves, thereby directly supplying mineral nutrients to the leaf surfaces and allowing them absorb mineral ingredients. However, the minerals supplied by deep-sea water diluted and sprayed onto leaves using this method is dissolved by rainwater and washed away, and therefore spraying of the deep-sea water is necessary every time it rains, which is troublesome and costly. [0006] Another method is to directly spray mineral nutrients over the soil and let plants absorb them from their roots. However, the supply of mineral nutrients to the roots has only a limited effect and has the same problem that they are easily washed away with rainwater. [0007] In addition, agricultural chemicals are sprayed to exterminate harmful insects, but such agricultural chemicals stick to plants and residual agricultural chemicals remain even after water washing plants, which is harmful not only to consumers but also agricultural producers and workers, etc. The use of agricultural chemicals by agricultural producers and workers are harmful to health particularly in the case of growing plants in greenhouse because it involves work in a closed room. Furthermore, the use of agricultural chemicals is harmful to health of not only agricultural producers and workers but also people in general when agricultural chemicals are applied to plants for public facilities such as street trees and park trees, etc. [0008] Thus there is a long felt need for a method of supplying mineral and other nutrients to plants in a manner that is not easily washed away by rain water. In addition, it would be beneficial if such method also limited or prevented insect damage to plants thus decreasing or even elimination the need for harmful agricultural chemicals such as pesticides. SUMMARY OF THE INVENTION [0009] The present invention meets the above long felt needs. As discussed above, to supply mineral ingredients to plants such as vegetables and fruits, the conventional methods include diluting deep-sea water containing sufficient mineral ingredients with water and then spraying it over leaf surfaces of plants or cultivating plants with soil containing added mineral ingredients, but the added mineral ingredients are washed away with rainwater and it is therefore necessary to spray deep-sea water after every time it rains, which is troublesome, takes a lot of man-hours and increases costs in aspects of labor efficiency and workability. The present invention solves this by supplying minerals to plant such that the minerals are not easily washed away. [0010] Moreover, agricultural chemicals are sprayed to exterminate harmful insects, but agricultural chemicals stick to plants and residual agricultural chemicals remain even after water washing plants, which is harmful not only to consumers but also to agricultural producers and workers, etc. The use of agricultural chemicals by agricultural producers and workers are harmful to health particularly in the case of growing plants in greenhouse because it involves work in a closed room. Furthermore, the use of agricultural chemicals is harmful to health of not only agricultural producers and workers but also people in general when agricultural chemicals are applied to plants for public facilities such as street trees and park trees, etc. Aspects of the present invention address these problems by providing an alternative means of protecting plants from insects that lowers exposure of humans to harmful agricultural chemicals. [0011] Thus it is an object of the present invention to provide a film forming agent for coating plants and a method for supplying ingredients to plants capable of supplying necessary ingredients to plants easily and reliably. [0012] It is another object of the present invention to provide a film forming agent for coating plants and a method for supplying ingredients to plants to make sure that damages to plants by harmful insects are prevented. [0013] It is a further object of the present invention to provide a film forming agent for coating plants and a method for supplying ingredients to plants which is absolutely harmless to the human body, capable of not only supplying necessary ingredients to plants easily and reliably but also making sure that damages to plants by harmful insects are prevented. [0014] In order to solve the above-described problems, the present invention provides a film forming agent for coating plants and a method for supplying nutrients to plants have the following characteristic configuration. One aspect of the present invention is a film forming agent for coating plants comprising cellulose which includes at least one of mineral ingredient and saccharide ingredient. In one embodiment, the mineral ingredient includes citric acid. [0015] In another embodiment, the film forming agent comprises a basic ingredient made of 2.5 to 3.5 weight percentage of hydroxypropylmethyl cellulose with a hydroxypropyl base of 4 to 12% by weight of the hydroxypropylmethyl cellulose and 96.5 to 97.5 weight percentage of water; a mineral content of 2.5 parts of the ingredient; and polysaccharide (sugar) content 5.0 parts of the solution 100 parts. In yet another embodiment, the mineral ingredient is composed of 40 parts natural salt, 60 parts natural magnesium chloride, and 3.0 part trisodium citrate. In still another embodiment, the polysaccharide is any one of sugar, fructose, maltose or glucose. In another embodiment, the film forming agent for coating plants comprises 2.5 to 3.5 parts hydroxypropylmethyl cellulose, 96.5 to 97.5 parts water; and 2 parts trisodium citrate. [0016] Another aspect of the present invention includes a method for supplying ingredients to plants by spraying any one of the above-described agents forming film coating plants over plants and thereby forming a cellulose film on the surface of said plants. DETAILED DESCRIPTION OF THE INVENTION [0017] The configurations and operations of embodiments of the film forming agent for coating plants and the method for supplying ingredients to plants according to the present invention will be explained in detail below. [0018] The film forming agent for coating plants and the method for supplying ingredients to plants form a thin cachet film (hereinafter referred to as “cell coat”) including ingredients necessary for a plant on the surface of plants by means of spraying, etc., and thereby directly supply the above-described necessary ingredients from the surface of the plant to the plant through the cell coat. The cell coat is made of a material used for medicine that is harmless to the human body, which is hardly washed away even if gotten wet with rain and therefore requires no repeated spraying every time it rains and provides excellent workability. [0019] In one embodiment, the film forming agent for coating plants (cellulose film forming agent) including the above-described cell coat according to the present invention is basically composed of hydroxypropylmethyl cellulose, sodium chloride, magnesium chloride and sodium citrate as principal ingredients, and may be mixed with approximately 70 kinds of other trace minerals and polysaccharides. [0020] A more specific composition of the film forming agent for coating plants according to the present invention is as in the following example: [0021] (1) A basic ingredient made of 2.5 to 3.5 weight percentage of hydroxypropylmethyl cellulose whose hydroxypropyl base is 4 to 12% by weight of the hydroxypropylmethyl cellulose and 96.5 to 97.5 weight percentage of water; [0022] mineral ingredient representing 2.5 percent by weight of the above-described basic ingredient; and [0023] polysaccharide (sugar) representing 5.0 percent by weight of the above-described basic ingredient [0024] is a preferred example of the film forming agent for coating plants. [0025] In one embodiment, the composition of the above-described mineral ingredient is: [0026] 40 parts natural salt; [0027] 60 parts natural magnesium chloride; and [0028] 3.0 parts trisodium citrate [0029] as a preferred example. [0030] In certain embodiments, the polysaccharide may be selected from the following group sugar, fructose, maltose or glucose, etc. [0031] Here, natural magnesium chloride is preferred as the mineral ingredient and the trisodium citrate acts as a catalyst to promote absorption at the surface of the plant. [0032] Another example of the present invention is as follows: [0033] (2) The film forming agent for coating plants is composed of: [0034] hydroxypropylmethyl cellulose: 2.5 to 3.5 parts [0035] water: 96.5 to 97.5 parts [0036] trisodium citrate: 2.0 parts [0037] Numerous suitable methods for forming the cell coat with the compositions of the present invention. By way of example, it is possible to use a spraying method utilizing a mechanical power sprayer, power sprayer, fixed power sprayer or knapsack power sprayer. The configuration of the film forming agent for coating plants used for spraying with each machine is as follows: [0038] 1. Mechanical power sprayer (SS): [0039] Film forming agent for coating plants in above example (1) or (2): 20 kg [0040] Water 500 l sprayed over 10 acres [0041] 2. Power sprayer: [0042] Film forming agent for coating plants in above example (1) or (2): 12 kg [0043] Water 300 l sprayed over 10 acres [0044] 3. Fixed power sprayer: [0045] Film forming agent for coating plants in above example (1) or (2): 8 kg [0046] Water 200 l sprayed over 10 acres [0047] 4. Knapsack power sprayer: [0048] Film forming agent for coating plants in above example (1) or (2): 4 kg [0049] Water 100 l sprayed over 10 acres [0050] Spraying and forming the cellulose film forming agent with the above-described configurations according to the present invention over the surface of a plant allows the surfaces of leaves and fruits of the plant to be wrapped with the cellulose film forming agent including a mineral ingredient and saccharide and allows the mineral ingredient and saccharide, etc., to be absorbed from the surfaces of leaves for a long stretch of time. This cellulose film forming agent forms film a on the surfaces of leaves and fruits in close contact with cell membranes, and can thereby protect the plant. [0051] This cellulose film forming agent has excellent weather resistance and durability, and is hardly washed away with rainwater, tasteless, odor-free, colorless, and harmless and provides excellent workability. The film typically has a thickness of 5 to 20 microns, which is made variable depending on the plant in such a way that it is thicker for those with permeability and thinner for those with less permeability. Mineral ingredients and saccharides are easily dissolved into water and therefore are easily washed away from leaves, but the use of them together with the cellulose film forming agent of the present invention allows for durable application of the mineral ingredients and saccharaides and can show significant effects on the plants even with a small amount thereof. [0052] Furthermore, since the cellulose film forming agent of the present invention forms a film on the surface of the plant, the plant is more resistant to viral infections and less susceptible to damage by harmful insects. [0053] Furthermore, the present invention has no residual agricultural chemicals and allows production of plants free of or with a reduced amount of agricultural chemicals and moreover this cellulose film is totally harmless even if it enters the human body through the mouth and is never washed away with rainwater. The present invention makes it possible for the plant to absorb minerals and saccharides, etc., through the cellulose film forming agent and thereby obtain plants which are strong, resistant to diseases and with a pleasant taste, and improve sugar content easily. [0054] It has been experimentally confirmed that forming a film on plants by spraying the cellulose film forming agent including various ingredients according to the present invention provides various effects as will be described below. [0055] It has been experimentally confirmed that including citric acid in minerals improves the absorbency of minerals by a plant and activates the plant, refreshes the leaf colors, increases chlorophyll and deepens the colors. [0056] Being coated with the cellulose film prevents evaporation of water content, last long and up taste sweet of vegetables and fruits. It has been experimentally confirmed that the presence of the cellulose prevents minerals and saccharides from being washed away with rainwater. [0057] Though bagging cultivation is used for many fruit trees, the coating with cellulose eliminates the need for it and further allows the plant to catch sunlight directly and thereby have a improve color and gross. It has been experimentally confirmed that light can be shielded by coating the plant with colored cellulose and wash it away with water and vinegar. [0058] In addition to the above-described effects, the following effects against harmful insects, insects and disease-causing germs can also be obtained: [0059] It has been experimentally confirmed that a hydroxypropyl base ranging from 4 to 12% by weight of the hydroxypropylmethyl cellulose can serve as a repellent against aphids, whiteflies, citrus red mites, red spiders, T.kanzawai (tea), etc. [0060] It has been experimentally confirmed that including mineral ingredients in the cellulose film has the bacteriostatic effect on disease-causing germs. [0061] It has been experimentally confirmed that including mineral ingredients in the cellulose film has the effect of repelling harmful insects. [0062] It has been experimentally confirmed that the cellulose film stops up pores of insects, causes them to have difficulty in breathing and prevents them from contacting leaves, etc., dropping off the leaves in approximately 3 to 7 minutes. This can avoid using agricultural chemicals for plants in public facilities such as street trees and park trees, making them completely harmless. [0063] It has been experimentally confirmed that being coated with the cellulose film prevents insects from inserting their stylets into or pulling them out of plant tissue and thereby prevents insect damage. [0064] It has been experimentally confirmed that being coated with the cellulose film prevents action of insects from hatching and becoming larvae and also prevents them from developing from larvae to imagoes. [0065] It has been experimentally confirmed that being coated with the cellulose film prevents infection by means of spores which are sources of infection and stay above the film. The coating reduces humidity, increases resistance to diseases, thus preventing diseases from spreading. [0066] Coating with the cellulose film and minerals causes disease-causing germs to be disinfected with minerals and lose their functions. Coating over an extended time period has a great effect. [0067] The effects of the present invention in an economical aspect include its advantage in labor efficiency and workability as it reduces the number of times spraying is required from 5 to 15 times in the conventional method for exterminating harmful insects to 2 to 5 times, 1/3 of the conventional one. [0068] Furthermore, compared to conventional agricultural chemicals, the present invention could reduce the total cost of materials used by 10 to 30%. [0069] The effects of the present invention in an environmental aspect include its harmlessness to workers as opposed to the use of agricultural chemicals which is harmful to health of agricultural producers and workers. Moreover, the health of consumers is also protected because there are no residual agricultural chemicals in harvested farm products and fruits. [0070] The effects of the present invention in a social aspect include its ability to make completely harmless plants for public facilities such as street trees and park trees, etc., to which agricultural chemicals have been conventionally applied, thus improving the environment, protecting nature and having no adverse effects on atopic people or people allergic to chemical substances. [0071] The configurations and operations of the preferred embodiments of the present invention have been described in detail so far. However, these embodiments are only illustrative examples of the present invention and do not limit the present invention. It is easily understandable to those skilled in the art that the present invention can be modified in various manners according to particular applications without departing from the spirit of the present invention or essential characteristics thereof. [0072] As described above, the film forming agent for coating plants and the method for supplying ingredients to plants of the present invention make it possible not only to supply necessary ingredients to plants easily and reliably but also to harmlessly and reliably prevent damage to plants caused by harmful insects. That is, it allows plants such as vegetables and fruits to easily absorb ingredients such as minerals and saccharides through the surfaces of leaves by a means that is not easily washed away by rainwater and easily supplies those ingredients, which promotes quality improvement and growth of plants. Furthermore, the present invention can eliminate damage to health of agricultural producers and workers caused by agricultural chemicals as measures against harmful insects and disease-causing germs.
The present invention provides compositions and methods of use and application of such compositions that not only ensure that necessary nutrients are supplied to plants but also harmlessly and reliably prevent damage to plants caused by harmful insects. The compositions include film forming agents for coating plants that include cellulose which contains at least one of a mineral ingredient and a saccharide ingredient, wherein the mineral ingredient preferably includes citric acid. This film forming agent for coating plants is sprayed over plants, and a cellulose film is thereby formed on the surfaces of the plant, supplying the nutrients to the plant and protecting the plant from insect damage.
big_patent
"[Scene: The Bay Mirror. Phoebe walks in and goes over to her assistant.]\nPhoebe: Good morning. Any(...TRUNCATED)
"Phoebe frees a genie, Jinny ( Saba Homayoon ), from a bottle, only to find that Jinny is a demon th(...TRUNCATED)
summ_screen_fd
"Write a title and summarize: Identification of functional genetic variation associated with increas(...TRUNCATED)
"DNA sequence variations (polymorphisms) that affect the expression levels of genes play important r(...TRUNCATED)
lay_plos
"Write a title and summarize: Regulation of rod gene expression has emerged as a potential therapeut(...TRUNCATED)
"There are several diseases that cause people to lose their eyesight and become blind. One of these (...TRUNCATED)
lay_elife
"Write a title and summarize: Neurocysticercosis is a disease caused by the oral ingestion of eggs f(...TRUNCATED)
"A method used to describe expressed genes at a specific stage in an organism is an EST library. In (...TRUNCATED)
lay_plos

Dataset Card for "summary-souffle"

DatasetDict({
    train: Dataset({
        features: ['text', 'summary', 'subset'],
        num_rows: 54087
    })
    validation: Dataset({
        features: ['text', 'summary', 'subset'],
        num_rows: 4262
    })
    test: Dataset({
        features: ['text', 'summary', 'subset'],
        num_rows: 4202
    })
})

token counts

402.69M tokens in train

pszemraj/long-t5-tglobal-base-16384-book-summary
         token_len
count      54087.0
mean   7445.306358
std    3868.060978
min          263.0
25%         4180.0
50%         6935.0
75%        10327.0
max        23926.0

v1

lay_plos          20789
multi_news        11708
big_patent         4164
gov_report         3514
summ_screen_fd     3449
billsum            2541
lay_elife          2528
booksum            2383
cnn_dailymail      1705
stacksmol           450
qmsum               396
squality            200
xlsum_en            118
worldbank            90
narrativeqa          49
dialogsum             3
Name: subset, dtype: int64
Downloads last month
0
Edit dataset card