Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
Chkhikvadze committed Jan 27, 2024
2 parents 32866ee + f28867d commit 313b739
Show file tree
Hide file tree
Showing 17 changed files with 340 additions and 616 deletions.
1 change: 0 additions & 1 deletion docs/running_a_validator.md
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,6 @@ pm2 start neurons/validators/api.py --interpreter /usr/bin/python3 --name valid
- `--neuron.run_all_miner_syn_qs_interval`: Sets the interval, in seconds, for querying all miners with synthetic questions. Set to a positive value to enable. A value of 0 disables this feature.
- `--reward.prompt_based_weight`: adjusts the influence of a scoring model that evaluates the accuracy and relevance of a node's responses to given prompts.
- `--reward.prompt_summary_links_content_based_weight`: Specifies the weight for the reward model that evaluates the relevance and quality of summary text in conjunction with linked content data.
- `--reward.rlhf_weight`: Weight for the rlhf reward model
- `--neuron.only_allowed_miners`: A list of miner identifiers, hotkey
- `--neuron.disable_twitter_links_content_fetch`: Enables the option to skip fetching content data for Twitter links, relying solely on the data provided by miners

Expand Down
18 changes: 18 additions & 0 deletions neurons/miners/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,24 @@ def get_config() -> "bt.Config":
default=True
)

parser.add_argument(
"--miner.openai_summary_model",
default="gpt-3.5-turbo-1106",
help="OpenAI model used for summarizing content.",
)

parser.add_argument(
"--miner.openai_query_model",
default="gpt-3.5-turbo-1106",
help="OpenAI model used for generating queries.",
)

parser.add_argument(
"--miner.openai_fix_query_model",
default="gpt-4-1106-preview",
help="OpenAI model used for fixing queries.",
)

# Adds subtensor specific arguments i.e. --subtensor.chain_endpoint ... --subtensor.network ...
bt.subtensor.add_args(parser)

Expand Down
2 changes: 0 additions & 2 deletions neurons/miners/miner.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,6 @@
client = AsyncOpenAI(timeout=60.0)
valid_hotkeys = []



class StreamMiner(ABC):
def __init__(self, config=None, axon=None, wallet=None, subtensor=None):
bt.logging.info("starting stream miner")
Expand Down
27 changes: 18 additions & 9 deletions neurons/miners/twitter_miner.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,15 +42,15 @@ def __init__(self, miner: any):
self.miner = miner

async def intro_text(self, model, prompt, send, is_intro_text):
bt.logging.info("miner.intro_text => ", self.miner.config.miner.intro_text)
bt.logging.info("Synapse.is_intro_text => ", is_intro_text)
bt.logging.trace("miner.intro_text => ", self.miner.config.miner.intro_text)
bt.logging.trace("Synapse.is_intro_text => ", is_intro_text)
if not self.miner.config.miner.intro_text:
return

if not is_intro_text:
return

bt.logging.info(f"Run intro text")
bt.logging.trace(f"Run intro text")

content = f"""
Generate introduction for that prompt: "{prompt}",
Expand Down Expand Up @@ -93,7 +93,7 @@ async def intro_text(self, model, prompt, send, is_intro_text):
}
)
await asyncio.sleep(0.1) # Wait for 100 milliseconds
bt.logging.info(f"Streamed tokens: {joined_buffer}")
bt.logging.trace(f"Streamed tokens: {joined_buffer}")
buffer = []

return buffer
Expand All @@ -105,7 +105,12 @@ async def fetch_tweets(self, prompt):
#todo we can find tweets based on twitter_query
filtered_tweets = get_random_tweets(15)
else:
tw_client = TwitterAPIClient()
openai_query_model = self.miner.config.miner.openai_query_model
openai_fix_query_model = self.miner.config.miner.openai_fix_query_model
tw_client = TwitterAPIClient(
openai_query_model=openai_query_model,
openai_fix_query_model=openai_fix_query_model
)
filtered_tweets, prompt_analysis = await tw_client.analyse_prompt_and_fetch_tweets(prompt)
return filtered_tweets, prompt_analysis

Expand Down Expand Up @@ -167,8 +172,11 @@ async def twitter_scraper(self, synapse: TwitterScraperStreaming, send: Send):
prompt = synapse.messages
seed = synapse.seed
is_intro_text = synapse.is_intro_text
bt.logging.info(synapse)
bt.logging.info(f"question is {prompt} with model {model}, seed: {seed}")
bt.logging.trace(synapse)

bt.logging.info("================================== Prompt ===================================")
bt.logging.info(prompt)
bt.logging.info("================================== Prompt ====================================")

# buffer.append('Test 2')
intro_response, (tweets, prompt_analysis) = await asyncio.gather(
Expand All @@ -188,7 +196,8 @@ async def twitter_scraper(self, synapse: TwitterScraperStreaming, send: Send):
# else:
# synapse.set_tweets(tweets)

response = await self.finalize_data(prompt=prompt, model=model, filtered_tweets=tweets, prompt_analysis=prompt_analysis)
openai_summary_model = self.miner.config.miner.openai_summary_model
response = await self.finalize_data(prompt=prompt, model=openai_summary_model, filtered_tweets=tweets, prompt_analysis=prompt_analysis)

# Reset buffer for finalizing data responses
buffer = []
Expand All @@ -213,7 +222,7 @@ async def twitter_scraper(self, synapse: TwitterScraperStreaming, send: Send):
"more_body": True,
}
)
bt.logging.info(f"Streamed tokens: {joined_buffer}")
bt.logging.trace(f"Streamed tokens: {joined_buffer}")
buffer = [] # Clear the buffer for the next set of tokens

joined_full_text = "".join(full_text) # Join all text chunks
Expand Down
7 changes: 0 additions & 7 deletions neurons/validators/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,13 +100,6 @@ def add_args(cls, parser):
default=0.05,
)

parser.add_argument(
"--reward.rlhf_weight",
type=float,
help="Weight for the rlhf reward model",
default=DefaultRewardFrameworkConfig.rlhf_model_weight,
)

parser.add_argument(
"--reward.prompt_based_weight",
type=float,
Expand Down
3 changes: 0 additions & 3 deletions neurons/validators/reward/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ class RewardModelType(Enum):
sentence_match_penalty = "sentence_match_penalty"
rlhf = "rlhf_reward_model"
prompt = "prompt_reward_model"
dpo = "dpo_reward_model"

class RewardScoringType(Enum):
twitter_question_answer_score = "twitter_question_answer_score"
Expand All @@ -35,7 +34,5 @@ class DefaultRewardFrameworkConfig:
"""Reward framework default configuration.
Note: All the weights should add up to 1.0.
"""
dpo_model_weight: float = 0
rlhf_model_weight: float = 0
prompt_model_weight: float = 1
prompt_model_summary_links_content_weight: float = 0
150 changes: 0 additions & 150 deletions neurons/validators/reward/dpo.py

This file was deleted.

66 changes: 0 additions & 66 deletions neurons/validators/reward/open_assistant.py

This file was deleted.

Loading

0 comments on commit 313b739

Please sign in to comment.